diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 11a14256801a..000000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,89 +0,0 @@ -repos: -- repo: meta - hooks: - - id: check-hooks-apply - - id: check-useless-excludes - -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 - hooks: - - id: check-case-conflict - - id: check-json - - id: check-symlinks - - id: check-yaml - - id: destroyed-symlinks - - id: end-of-file-fixer - exclude: docs/CNAME - - id: fix-byte-order-marker - - id: fix-encoding-pragma - args: [--remove] - - id: mixed-line-ending - args: [--fix=lf] - - id: requirements-txt-fixer - - id: trailing-whitespace - -- repo: https://github.com/google/yapf - rev: v0.32.0 - hooks: - - id: yapf - -- repo: https://gitlab.com/daverona/pre-commit/cpp - rev: 0.8.0 - hooks: - - id: clang-format # formatter of C/C++ code based on a style guide: LLVM, Google, Chromium, Mozilla, and WebKit available - args: [] - -- repo: local - hooks: - - id: check-torchdist - name: check-torchdist - entry: ./scripts/check-torchdist.py - language: python - exclude: ^(deepspeed/comm/|docs/|benchmarks/|scripts/check-torchdist.py|deepspeed/moe/sharded_moe.py|deepspeed/runtime/comm/coalesced_collectives.py|deepspeed/elasticity/elastic_agent.py|deepspeed/launcher/launch.py|tests/unit/comm/test_dist.py) - # Specific deepspeed/ files are excluded for now until we wrap ProcessGroup in deepspeed.comm - -- repo: local - hooks: - - id: check-license - name: check-license - entry: ./scripts/check-license.py - language: python - files: \.(py|c|cpp|cu|cc|h|hpp|cuh|hip|tr)$ - exclude: ^(deepspeed/inference/v2/kernels/ragged_ops/blocked_flash|deepspeed/inference/v2/kernels/cutlass_ops/grouped_gemm) - -- repo: https://github.com/codespell-project/codespell - rev: v2.1.0 - hooks: - - id: codespell - args: [ - # Do not check files that are automatically generated - '--skip=docs/Gemfile.lock,tests/unit/gpt2-merges.txt,tests/unit/gpt2-vocab.json', - '--ignore-regex=\\n', # Do not count the 'n' in an escaped newline as part of a word - '--ignore-words-list=youn,unsupport,noe', # Word used in error messages that need rewording - --check-filenames, - --check-hidden - ] - -- repo: https://github.com/pycqa/flake8 - rev: 4.0.1 - hooks: - - id: flake8 - args: ['--config=.flake8'] - -- repo: local - hooks: - - id: check-torchcuda - name: check-torchcuda - entry: ./scripts/check-torchcuda.py - language: python - exclude: ^(.github/workflows/|scripts/check-torchcuda.py|docs/_tutorials/accelerator-abstraction-interface.md|accelerator/cuda_accelerator.py|deepspeed/inference/engine.py|deepspeed/model_implementations/transformers/clip_encoder.py|deepspeed/model_implementations/diffusers/vae.py|deepspeed/model_implementations/diffusers/unet.py|op_builder/spatial_inference.py|op_builder/transformer_inference.py|op_builder/builder.py|setup.py|tests/unit/ops/sparse_attention/test_sparse_attention.py) - # Specific deepspeed/ files are excluded for now until we wrap ProcessGroup in deepspeed.comm - -- repo: local - hooks: - - id: check-extraindexurl - name: check-extraindexurl - entry: ./scripts/check-extraindexurl.py - language: python - files: \.(yml|yaml|sh|py)$ - exclude: ^(scripts/check-extraindexurl.py) diff --git a/accelerator/hpu_accelerator.py b/accelerator/hpu_accelerator.py index dd87461696cf..e2fdb91afd94 100644 --- a/accelerator/hpu_accelerator.py +++ b/accelerator/hpu_accelerator.py @@ -3,6 +3,7 @@ # DeepSpeed Team +import functools import os import pkgutil import importlib @@ -17,6 +18,7 @@ def __init__(self): self._name = 'hpu' self._communication_backend_name = 'hccl' self._compile_backend = "hpu_backend" + self.apply_hpu_workarounds() try: import habana_frameworks.torch.hpu as hpu hpu.setDeterministic(True) @@ -27,6 +29,15 @@ def __init__(self): self.fp16_supported = None + def apply_hpu_workarounds(self): + + def update_wa_env_var(key, value): + if key not in os.environ.keys(): + os.environ[key] = value + + update_wa_env_var("PT_HPU_LAZY_ACC_PAR_MODE", "0") + update_wa_env_var("PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES", "0") + # Device APIs def is_synchronized_device(self): return False @@ -41,9 +52,8 @@ def handles_memory_backpressure(self): return True def device_name(self, device_index=None): - if device_index is None: - return 'hpu' - return 'hpu:{}'.format(device_index) + # ignoring device_index. + return 'hpu' def device(self, device_index=None): return torch.device(self.device_name(device_index)) @@ -194,33 +204,34 @@ def replay_graph(self, graph): return # Tensor operations + # TODO(SW-192865): Remove WA for tensor wrappers. @property def BFloat16Tensor(self): - return self.hpu.BFloat16Tensor + return functools.partial(torch.tensor, dtype=torch.bfloat16, device='hpu') @property def ByteTensor(self): - return self.hpu.ByteTensor + return functools.partial(torch.tensor, dtype=torch.uint8, device='hpu') @property def DoubleTensor(self): - return self.hpu.DoubleTensor + return functools.partial(torch.tensor, dtype=torch.double, device='hpu') @property def FloatTensor(self): - return self.hpu.FloatTensor + return functools.partial(torch.tensor, dtype=torch.float, device='hpu') @property def HalfTensor(self): - return self.hpu.HalfTensor + return functools.partial(torch.tensor, dtype=torch.half, device='hpu') @property def IntTensor(self): - return self.hpu.IntTensor + return functools.partial(torch.tensor, dtype=torch.int, device='hpu') @property def LongTensor(self): - return self.hpu.LongTensor + return functools.partial(torch.tensor, dtype=torch.long, device='hpu') def pin_memory(self, tensor, align_bytes=1): return tensor.pin_memory(self.device()) @@ -289,6 +300,14 @@ def get_op_builder(self, class_name): else: return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None + #shall be removed once moving to torch.compile + def wrap_in_hpu_graph(self, module): + if self.hpu.is_lazy(): + module = self.hpu.wrap_in_hpu_graph(module) + else: + print("Warning: hpu graphs in eager mode is not supported, ignoring") + return module + def build_extension(self): from torch.utils.cpp_extension import BuildExtension return BuildExtension @@ -297,7 +316,12 @@ def export_envs(self): return [] def visible_devices_envs(self): - return ['HABANA_VISIBLE_MODULES'] + # TODO SW-195658: remove WA to not return HABANA_VISIBLE_MODULES once SW-195657 is resolved + # Current way deepspeed set this env var is not applicable with all HPU instances + # User has to follow instructions in: + # https://docs.habana.ai/en/latest/PyTorch/Reference/PT_Multiple_Tenants_on_HPU/Multiple_Workloads_Single_Docker.html + # keeping CUDA_VISIBLE_DEVICES + return ['CUDA_VISIBLE_DEVICES'] #['HABANA_VISIBLE_MODULES'] def set_visible_devices_envs(self, current_env, local_accelerator_ids): for env in self.visible_devices_envs(): diff --git a/build.txt b/build.txt new file mode 100644 index 000000000000..d625b6085876 --- /dev/null +++ b/build.txt @@ -0,0 +1 @@ ++hpu.synapse.v1.18.0 diff --git a/csrc/transformer/inference/csrc/pt_binding.cpp b/csrc/transformer/inference/csrc/pt_binding.cpp index 1b9f91cd9c88..e7ab8bcabfbc 100644 --- a/csrc/transformer/inference/csrc/pt_binding.cpp +++ b/csrc/transformer/inference/csrc/pt_binding.cpp @@ -452,15 +452,16 @@ std::vector ds_softmax_context(at::Tensor& query_key_value, unsigned layer_id, unsigned num_layers, at::Tensor& alibi, - float rope_theta) + float rope_theta, + bool is_prompt, + std::optional token_idx, + std::optional position_ids) { unsigned bsz = query_key_value.size(0); unsigned seq_len = query_key_value.size(1); int k = query_key_value.size(2) / (heads + 2 * (num_kv > 0 ? num_kv : heads)); unsigned hidden_dim = heads * k; - bool is_prompt = (seq_len > 1); - if (is_prompt) InferenceContext::Instance().reset_tokens(seq_len); unsigned soft_len = InferenceContext::Instance().current_tokens(); @@ -2028,7 +2029,7 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) "DeepSpeed memory allocation for GPT inference with " #_name " (CUDA)"); \ m.def("dequantize_" #_name, \ &ds_dequantize<_dtype>, \ - "DeepSpeed dequantize with " #_name " (CUDA)") + "DeepSpeed dequantize with " #_name " (CUDA)"); DEF_OPS(fp32, float); DEF_OPS(fp16, __half); diff --git a/deepspeed/checkpoint/zero_checkpoint.py b/deepspeed/checkpoint/zero_checkpoint.py index 6730b93dfd4f..c5d4102859fb 100644 --- a/deepspeed/checkpoint/zero_checkpoint.py +++ b/deepspeed/checkpoint/zero_checkpoint.py @@ -105,7 +105,7 @@ def _strip_tensor_paddings(self, sd): if group_paddings[key] == 0: continue for state_name, state_value in group_state.items(): - if state_name != "step" and torch.is_tensor(state_value): + if state_name != "step" and torch.is_tensor(state_value) and state_value.dim(): raw_length = state_value.numel() - group_paddings[key] group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone() else: diff --git a/deepspeed/comm/torch.py b/deepspeed/comm/torch.py index 14d4f3847315..6dba3eb152f2 100644 --- a/deepspeed/comm/torch.py +++ b/deepspeed/comm/torch.py @@ -146,18 +146,15 @@ def init_process_group(self, backend, timeout, init_method, rank, world_size): world_size=world_size) self.using_mpi = torch.distributed.get_backend() == 'mpi' - @compiler.disable def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): op = self._reduce_op(op) return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op) - @compiler.disable def inference_all_reduce(self, tensor, op, group=None): if self.shm_comm_op == None or self.shm_comm_op.inference_all_reduce(tensor, op) == -1: op = self._reduce_op(op) return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=False) - @compiler.disable def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): """ proxy func to torch.distributed.all_reduce_coalesced, which is included in PyTorch 1.13 and above @@ -168,7 +165,6 @@ def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group op = self._reduce_op(op) return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op) - @compiler.disable def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): if DS_COMM_REDUCE_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -176,7 +172,6 @@ def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): return Noop() return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op) - @compiler.disable def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False): if DS_COMM_REDUCE_SCATTER_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -189,7 +184,6 @@ def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_ group=group, async_op=async_op) - @compiler.disable def broadcast(self, tensor, src, group=None, async_op=False): if DS_COMM_BROADCAST_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -198,7 +192,6 @@ def broadcast(self, tensor, src, group=None, async_op=False): else: return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) - @compiler.disable def all_gather(self, tensor_list, tensor, group=None, async_op=False): if DS_COMM_ALL_GATHER_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -207,7 +200,6 @@ def all_gather(self, tensor_list, tensor, group=None, async_op=False): else: return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op) - @compiler.disable def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False): if self.has_all_gather_into_tensor(): return self.all_gather_function(output_tensor=output_tensor, @@ -215,7 +207,6 @@ def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_ group=group, async_op=async_op) - @compiler.disable def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False): if DS_COMM_ALL_GATHER_OFF: if int(os.getenv('RANK', '0')) == 0: @@ -233,7 +224,6 @@ def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=Fals "please consider upgrading your pytorch installation.") pass - @compiler.disable def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False): """""" assert len(output_tensors) == len(input_tensors), "" @@ -257,7 +247,6 @@ def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_ else: reqs[-1].wait() - @compiler.disable def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False): if self.has_reduce_scatter_tensor(): return self.reduce_scatter_function(output_tensor, @@ -271,7 +260,6 @@ def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, gr "please consider upgrading your pytorch installation.") pass - @compiler.disable def all_to_all_single(self, output, input, @@ -286,27 +274,21 @@ def all_to_all_single(self, group=group, async_op=async_op) - @compiler.disable def all_to_all(self, output_tensor_list, input_tensor_list, group=None, async_op=False): return torch.distributed.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op) - @compiler.disable def send(self, tensor, dst, group=None, tag=0): return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag) - @compiler.disable def recv(self, tensor, src=None, group=None, tag=0): return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag) - @compiler.disable def isend(self, tensor, dst, group=None, tag=0): return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag) - @compiler.disable def irecv(self, tensor, src=None, group=None, tag=0): return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag) - @compiler.disable def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False): return torch.distributed.gather(tensor=tensor, gather_list=gather_list, @@ -314,7 +296,6 @@ def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False): group=group, async_op=async_op) - @compiler.disable def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False): return torch.distributed.scatter(tensor=tensor, scatter_list=scatter_list, @@ -322,13 +303,11 @@ def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False): group=group, async_op=async_op) - @compiler.disable def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None): if group is None: group = torch.distributed.GroupMember.WORLD return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids) - @compiler.disable def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False): if group is None: group = torch.distributed.GroupMember.WORLD diff --git a/deepspeed/inference/engine.py b/deepspeed/inference/engine.py index 5cdd99ff0b90..bc6a5faa45ff 100755 --- a/deepspeed/inference/engine.py +++ b/deepspeed/inference/engine.py @@ -13,6 +13,7 @@ from packaging import version as pkg_version from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine from deepspeed.utils.timer import SynchronizedWallClockTimer +from deepspeed.runtime.compiler import is_compile_supported from ..runtime.state_dict_factory import SDLoaderFactory from ..runtime.weight_quantizer import WeightQuantization @@ -29,6 +30,7 @@ from ..module_inject.auto_tp_model_utils import build_bloom_alibi_tensor, build_mpt_atten_bias_tensor, build_mpt_alibi_tensor, get_alibi_mask from ..ops.transformer.inference.ds_attention import DeepSpeedSelfAttention from ..model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference +from ..ops.transformer.inference.op_binding.workspace import WorkspaceOp DS_INFERENCE_ENABLED = False from torch import nn @@ -51,13 +53,8 @@ def __init__(self, model, config): DS_INFERENCE_ENABLED = True super().__init__() - - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module - if inference_module is not None: - self.destroy() + self.workspace = WorkspaceOp() + self.destroy() self.module = model self._config = config @@ -180,22 +177,19 @@ def __init__(self, model, config): dist.broadcast(_rng_state, 0) get_accelerator().set_rng_state(_rng_state.cpu()) - if config.tensor_parallel.tp_size > 1: + if config.enable_cuda_graph and get_accelerator().device_name() == 'hpu': + self.module = get_accelerator().wrap_in_hpu_graph(self.module) + elif config.tensor_parallel.tp_size > 1: assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism" # Check if local CUDA graphs can be created in replacement modules self.local_cuda_graph = self._local_cuda_graph_used(self.module) + self._is_compiled = False def destroy(self): - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module DeepSpeedTransformerInference.layer_id = 0 DeepSpeedSelfAttention.num_layers = 0 - if inference_module is not None: - inference_module.release_workspace() - inference_module = None + self.workspace.release_workspace() def profile_model_time(self, use_cuda_events=True): if not self.model_profile_enabled and not self._config.enable_cuda_graph: @@ -321,7 +315,7 @@ def _validate_args(self, mpu, replace_with_kernel_inject): if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)): raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}") - supported_dtypes = [None, torch.half, torch.int8, torch.float] + supported_dtypes = [None, torch.half, torch.int8, torch.float, torch.bfloat16] if self._config.dtype not in supported_dtypes: raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}") @@ -589,7 +583,8 @@ def forward(self, *inputs, **kwargs): **kwargs: variable length keyword arguments """ start = None - if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph: + if self.model_profile_enabled and (get_accelerator().device_name() == 'cuda' or get_accelerator().device_name() == 'hpu') and \ + self._config.enable_cuda_graph: get_accelerator().synchronize() start = time.time() @@ -634,3 +629,19 @@ def _generate(self, *inputs, **kwargs): ) return self.module.generate(*inputs, **kwargs) + + def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwargs={}) -> None: + """ + Compile the module using the specified backend and kwargs. + """ + if not is_compile_supported(): + raise RuntimeError("compile is not supported in your version of PyTorch.") + + if self._is_compiled: + return + self.module.compile(backend=backend, **compile_kwargs) + self._is_compiled = True + + @property + def is_compiled(self) -> bool: + return self._is_compiled diff --git a/deepspeed/launcher/runner.py b/deepspeed/launcher/runner.py index 0ca89dd0497a..821ddb42a28d 100755 --- a/deepspeed/launcher/runner.py +++ b/deepspeed/launcher/runner.py @@ -31,7 +31,7 @@ from deepspeed.accelerator import get_accelerator DLTS_HOSTFILE = "/job/hostfile" -EXPORT_ENVS = ['MLFLOW', 'PYTHON', 'MV2', 'UCX'] +EXPORT_ENVS = ['MLFLOW', 'NCCL', 'PYTHON', 'MV2', 'UCX'] EXPORT_ENVS += NEBULA_EXPORT_ENVS DEEPSPEED_ENVIRONMENT_NAME = os.getenv("DS_ENV_FILE", ".deepspeed_env") DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.'] diff --git a/deepspeed/model_implementations/transformers/ds_llama2.py b/deepspeed/model_implementations/transformers/ds_llama2.py index 7d9eb4113a8a..325bfb4f7e18 100644 --- a/deepspeed/model_implementations/transformers/ds_llama2.py +++ b/deepspeed/model_implementations/transformers/ds_llama2.py @@ -4,11 +4,8 @@ # DeepSpeed Team import torch -from deepspeed import comm as dist from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference -inference_module = None - class DeepSpeedLlama2Inference(DeepSpeedTransformerInference): """Initialize the DeepSpeed OPT Transformer Layer. @@ -27,18 +24,10 @@ def forward(self, *args, **kwargs): input = args[0] input_mask = None - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False - get_present = True + self.allocate_workspace(input.size()) + # We set the prev key/value to None when there is a prompt if input.shape[1] > 1: self.layer_past = None diff --git a/deepspeed/model_implementations/transformers/ds_transformer.py b/deepspeed/model_implementations/transformers/ds_transformer.py index d87d0de997b5..d30416cc5842 100644 --- a/deepspeed/model_implementations/transformers/ds_transformer.py +++ b/deepspeed/model_implementations/transformers/ds_transformer.py @@ -6,19 +6,18 @@ import torch import torch.nn as nn from deepspeed import comm as dist +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.utils.logging import log_dist from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed if deepspeed.HAS_TRITON: from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention -inference_module = None - class DeepSpeedTransformerInference(nn.Module): """Initialize the DeepSpeed Transformer Layer. @@ -37,6 +36,7 @@ class DeepSpeedTransformerInference(nn.Module): for specific downstream tasks. """ layer_id = 0 + workspace = None def __init__(self, config, @@ -52,10 +52,6 @@ def __init__(self, DeepSpeedTransformerInference.layer_id += 1 data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedTransformerInference.layer_id == 1: log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0]) @@ -87,23 +83,26 @@ def __init__(self, requires_grad=False) self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device), requires_grad=False) - self.layer_past = None - try: - if config.dtype == torch.float32: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - elif config.dtype == torch.bfloat16: - self.allocate_workspace = inference_module.allocate_workspace_bf16 - else: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - self._alloc_workspace = True - except AttributeError: - self.allocate_workspace = None - self._alloc_workspace = False + self.layer_norm = LayerNormOp() + DeepSpeedTransformerInference.workspace = WorkspaceOp(self.config) + self._should_allocate_workspace = True + self.allocate_workspace_func = self.workspace.allocate_workspace + + def allocate_workspace(self, size): + # Allocate memory only on first layer forward + if self.config.layer_id == 0 and self._should_allocate_workspace: + self.allocate_workspace_func(self.config.hidden_size, self.config.heads, size[1], size[0], + DeepSpeedTransformerInference.layer_id, self.config.mp_size, + self.config.bigscience_bloom, + dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, + self.config.min_out_tokens) + self._should_allocate_workspace = False @classmethod def reset_cache(cls): - if inference_module is not None: - inference_module.reset_cache() + if cls.workspace is None: + cls.workspace = WorkspaceOp() + cls.workspace.reset_cache() def forward( self, @@ -136,23 +135,11 @@ def forward( input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False + self.allocate_workspace(input.size()) get_present = (get_present or get_key_value or use_cache) input_mask = input_mask if attention_mask is None else attention_mask - - # We set the prev key/value to None when there is a prompt - if input.shape[1] > 1: - self.layer_past = None - layer_past = layer_past if layer_past is not None else self.layer_past + layer_past = past_key_value if past_key_value is not None else layer_past head_mask = layer_head_mask if layer_head_mask is not None else head_mask attn_mask = None @@ -178,14 +165,14 @@ def forward( output_attentions, self.norm_w, self.norm_b, - alibi) + alibi, + **kwargs) presents = (key, value) - self.layer_past = presents if layer_past is None else None output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob) if not self.config.pre_layer_norm: - output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) + output = self.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) output = output.to(input_type) if get_present: diff --git a/deepspeed/module_inject/auto_tp.py b/deepspeed/module_inject/auto_tp.py index 1c76cbc0a6ef..7c37b336ee9e 100644 --- a/deepspeed/module_inject/auto_tp.py +++ b/deepspeed/module_inject/auto_tp.py @@ -134,7 +134,7 @@ def is_load_module(module): load_layer_names = [ "LPLayerNorm", "SharedEmbedding", "OPTLearnedPositionalEmbedding", "LlamaRMSNorm", "FalconLinear", "MistralRMSNorm", "T5LayerNorm", "MixtralRMSNorm", "Phi3RotaryEmbedding", "Phi3SuScaledRotaryEmbedding", - "Phi3RMSNorm", "YuanRMSNorm", "YuanRotaryEmbedding" + "Phi3RMSNorm", "YuanRMSNorm", "YuanRotaryEmbedding", "Qwen2RMSNorm" ] return module.__class__ in load_layers or module._get_name() in load_layer_names diff --git a/deepspeed/module_inject/containers/base.py b/deepspeed/module_inject/containers/base.py index 83e109167ffe..ab6286325eed 100644 --- a/deepspeed/module_inject/containers/base.py +++ b/deepspeed/module_inject/containers/base.py @@ -203,6 +203,12 @@ def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b): self._4hh_b = _4hh_b def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb): + #TODO SW-164572: remove below mark_step WA once SW-164573 is resolved. + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + if thpu.is_initialized(): + import habana_frameworks.torch.core as htcore + htcore.mark_step() self.attn_nw = attn_nw self.attn_nb = attn_nb self.input_nw = input_nw diff --git a/deepspeed/module_inject/policy.py b/deepspeed/module_inject/policy.py index 41df2b85dc0c..d9cb8c2d9eb5 100644 --- a/deepspeed/module_inject/policy.py +++ b/deepspeed/module_inject/policy.py @@ -27,7 +27,7 @@ class DSPolicy(ABC): _orig_layer_class = None def __init__(self): - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True @abstractmethod def attention(self): @@ -62,7 +62,7 @@ def __init__( # Type of normalization to perform norm_type=NormType.LayerNorm): super().__init__() - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True self.inference = inference self.linear_layer = linear_layer self.scale_attention = scale_attention diff --git a/deepspeed/module_inject/tp_shard.py b/deepspeed/module_inject/tp_shard.py index 6758c7a657f6..a2b0d5edb927 100644 --- a/deepspeed/module_inject/tp_shard.py +++ b/deepspeed/module_inject/tp_shard.py @@ -5,6 +5,8 @@ from deepspeed import comm as dist global num_kv_heads +# TODO: SW-184584 remove this WA. +is_old_shard_size = None def set_num_kv_heads(num): @@ -34,12 +36,17 @@ def get_num_attention_heads(): def get_shard_size(total_size, mp_size, name=None, rank=None): global num_kv_heads + # TODO: SW-184584 remove this WA. + global is_old_shard_size + if is_old_shard_size is None: + import os + is_old_shard_size = os.environ.get("HPU_DS_OLD_SHARD_SIZE", "1").lower() in ["true", "1"] last_linear = ["lm_head", "embed_out"] # When we have num_kv_heads defined, uneven division is possible, otherwise enforce near even division if rank == None: rank = dist.get_rank() - if num_kv_heads != None and total_size % num_kv_heads == 0 and "mlp" not in str(name) and str( - name) not in last_linear: + if num_kv_heads != None and (is_old_shard_size or (total_size % num_kv_heads == 0 and "mlp" not in str(name) + and str(name) not in last_linear)): my_slices = (num_kv_heads // mp_size) + (1 if rank < (num_kv_heads % mp_size) else 0) return total_size * my_slices // num_kv_heads else: diff --git a/deepspeed/moe/capacity_bins.py b/deepspeed/moe/capacity_bins.py new file mode 100644 index 000000000000..adff9f851be9 --- /dev/null +++ b/deepspeed/moe/capacity_bins.py @@ -0,0 +1,345 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import torch +from typing import Union +from deepspeed import comm as dist +from deepspeed.utils import groups +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size + + +class CapacityBins(torch.nn.Module): + """ CapacityBins - maps current capacity value into capacity bins. + + When using drop_tokens=false, the capacity at each iteration will differ since + we use a capacity to accommodate for the largest number of tokens sent to an expert. + This creates dynamic shapes tensors. + + The motivation for using bins is to reduce the dynamic shapes to a limited set, hence + being more friendly when running in non-eager mode (e.g., using compile). + + The minimum range of capacity is the optimal capacity where all tokens are evenly routed + among all experts. The maximum range of capacity is the worst-case capacity where all + tokens are routed to a single expert (unlikely, but a valid upper bound). + + This class maintains the current configured capacity bins. It also tracks bins usage info + which enables to dynamically update the capacity bins to optimize performance (i.e. to + minimize the number of dummy extra tokens that are routed). + + Upon initialization, if configured_bins provided, use configured_bins to initialize the bins. + Otherwise, the capacity bins are initialized to bins with exponentially growing width. + + Argument use_cpu forces capacity bins logic to be executed on the CPU (not on the accelerator). + When using torch.compile, this prevents potential graph breaks. + """ + + def __init__(self, + k: int, + num_experts: int, + num_capacity_bins: int, + capacity_bins_exp_base: float, + capacity_bins_alignment: int, + min_bin_size: int = 1, + configured_bins: Union[list, None] = None, + use_cpu=True) -> None: + super().__init__() + self.k = k + self.num_experts = num_experts + self.num_capacity_bins = num_capacity_bins + self.capacity_bins_exp_base = capacity_bins_exp_base + self.configured_alignment = capacity_bins_alignment + assert min_bin_size > 0, f'CapacityBins min_bin_size must be > 0, got {min_bin_size}' + self.min_bin_size = min_bin_size + if configured_bins is not None: + assert len(configured_bins) == self.num_capacity_bins, \ + f'Configured bins ({configured_bins}) does not match num capacity bins ({self.num_capacity_bins})' + assert all(bin_edge > 0 for bin_edge in configured_bins), \ + 'Configured bin edges must be > 0' + assert all(configured_bins[i] < configured_bins[i+1] for i in range(len(configured_bins)-1)), \ + 'Configured bin edges must be a strictly increasing list' + self.use_cpu = use_cpu + + # initialize usage stats + zero_bins = torch.zeros(num_capacity_bins, dtype=torch.long, device='cpu', requires_grad=False) + self.register_buffer('bins_usage', zero_bins.clone().detach()) + self.register_buffer('bins_usage_last', zero_bins.clone().detach()) + + # initialize bin edges + if configured_bins is not None: + self.register_buffer('capacity_bins', + torch.tensor(configured_bins, dtype=torch.long, device='cpu', requires_grad=False)) + else: + # we don't know the range of the capacity bins, therefore we create a zeroed tensor + # when we load from checkpoint, or during the first forward, we update the bins + # note that if the first element =0, it marks that capacity_bins is not initialized + self.register_buffer('capacity_bins', zero_bins.clone().detach()) + + # attribute self.device is the device to use for capacity bins logic, where attribute self.model_device + # is the device used by the model. attributes can be different in case use_cpu is configured. + self.device = None + self.model_device = None + + self.min_tokens_per_expert = None + self.max_tokens_per_expert = None + self.alignment = None + + def set_bins(self, bins: list): + with (torch.no_grad()): + # set the new capacity bins and clear the usage stats (not relevant for new bins) + self.capacity_bins.copy_(torch.tensor(bins, dtype=torch.long, device=self.device)) + self.bins_usage.zero_() + self.bins_usage_last.zero_() + + def get_stats(self, incremental=True): + + def is_usage_data_available(usage_tensor): + with torch.no_grad(): + return usage_tensor.sum().item() > 0 + + if not is_usage_data_available(self.bins_usage): + return None + + with torch.no_grad(): + # reduce stats across all workers; for that, we need to temporarily move stats to model device + bins_usage = self.bins_usage.clone().detach().to(self.model_device) + dist.all_reduce(bins_usage, op=dist.ReduceOp.SUM, group=groups._get_data_parallel_group()) + bins_usage = bins_usage.to(self.device) + + # incremental returns only the diff from last activation of get_stats() + if incremental: + delta_bins_usage = bins_usage + if is_usage_data_available(self.bins_usage_last): + delta_bins_usage -= self.bins_usage_last + self.bins_usage_last.copy_(bins_usage) + bins_usage = delta_bins_usage + + # stats are returned using cpu tensors + bins_usage = bins_usage.to('cpu') + bins_usage_list = bins_usage.tolist() + bins_edges = self.capacity_bins.clone().detach().to('cpu') + bins_edges_list = bins_edges.tolist() + stats = { + 'min_range': self.min_tokens_per_expert, + 'max_range': self.max_tokens_per_expert, + 'alignment': self.alignment, + 'min_bin_size': self.min_bin_size if self.min_bin_size is not None else 0, + 'edges': bins_edges, + 'usage': bins_usage, + 'summary': {f'bin{i}_{bins_edges_list[i]}': bins_usage_list[i] + for i in range(len(bins_usage))} + } + return stats + + def _save_device(self, device: str): + if self.device is None: + # set self.device to requested device for capacity bins logic. also keep device used by model + assert self.model_device is None, f'Expected model_device=None on 1st forward, but got {self.model_device}' + self.model_device = device + self.device = 'cpu' if self.use_cpu else self.model_device + + # move all model's buffers to device used for capacity bins logic + self.capacity_bins = self.capacity_bins.to(self.device) + self.bins_usage = self.bins_usage.to(self.device) + self.bins_usage_last = self.bins_usage_last.to(self.device) + + def get_binned_capacity(self, gate_output, capacity, update_stats=True): + with torch.no_grad(): + # on first forward, capture device used + # then, move inputs to requested capacity bins device + self._save_device(gate_output.device) + gate_output, capacity = gate_output.to(self.device), capacity.to(self.device) + + # get bins; if first call, calculate bins + bins = self._get_capacity_bins(gate_output.shape[0], gate_output.device) + + # find bin to use based on current capacity and update stats + index = torch.searchsorted(bins, capacity, right=False) + if update_stats: + self._update_stats(index) + + return bins[index].to(self.model_device) + + def _update_stats(self, index): + # currently we maintain stats for training only + if self.training: + self.bins_usage[index] += 1 + + def _generate_bins(self, force_start_bin=False): + # create exponentially growing width bins, and normalize width sum to 1.0 + # when force_start_bin=True, we force the first bin value = start range (aka start). + # force_start_bin=True is handled by prepending width=0 + start = self.min_tokens_per_expert + stop = self.max_tokens_per_expert + exp_base = torch.tensor(self.capacity_bins_exp_base, dtype=torch.float).to(self.device) + if force_start_bin: + bin_widths = exp_base**torch.arange(0, self.num_capacity_bins - 1, device=self.device) + bin_widths = torch.cat([torch.tensor([0.], device=bin_widths.device), bin_widths]) + else: + bin_widths = exp_base**torch.arange(0, self.num_capacity_bins, device=self.device) + normalized_bin_widths = bin_widths / torch.sum(bin_widths) + + # calculate bin edges by accumulating the bins width and scaling to [start...stop] range + # finally, align bin edges + bin_edges = torch.cumsum(normalized_bin_widths, dim=0) + bin_edges = start + (stop - start) * bin_edges + bin_edges = torch.ceil(bin_edges / self.alignment).mul(self.alignment).to(torch.long) + + # verify that we got N distinct capacity bins + assert len(set(bin_edges.tolist())) == self.num_capacity_bins, \ + f'Resulting capacity bins size != {self.num_capacity_bins}, bins={bin_edges.tolist()}. ' \ + f'Please try to reduce expotent base value with HL_CAPACITY_BINS_EXP_BASE ' \ + f'(current value: {exp_base.item()}, minimal value: 1.0). ' \ + f'If this is insufficient, limit the number of capacity bins with ' \ + f'HL_MOE_NUM_CAPACITY_BINS (set to {self.num_capacity_bins}) or reduce alignment with ' \ + f'HL_MOE_CAPACITY_BINS_ALIGNMENT (set to {self.alignment}).' + + return bin_edges + + def _verify_configured_bins(self): + """ This method runs once (at first forward) and verifies that configured bins are valid """ + # verify configured bins range + if (self.capacity_bins[0].item() < self.min_tokens_per_expert + or self.capacity_bins[-1].item() < self.max_tokens_per_expert): + print( + f'Invalid capacity_bins={self.capacity_bins.clone().detach().cpu().tolist()},tokens per expert (min,max)={(self.min_tokens_per_expert, self.max_tokens_per_expert)}' + ) + return False + # verify configured bins alignment + alignment = torch.tensor(self.alignment, dtype=torch.long, device=self.device) + if torch.remainder(self.capacity_bins, alignment).sum().item() != 0: + print( + f'Invalid capacity_bins={self.capacity_bins.clone().detach().cpu().tolist()}, alignment={self.alignment} ' + ) + return False + return True + + def _get_capacity_bins(self, size: int, device: str) -> Union[torch.Tensor, None]: + """ Generates capacity bins with exponential growing width. + + During training, we encourage tokens to be evenly routed (via aux loss). + Therefore, generate bins with exponential growing bins width, i.e., bins that are + closer to the start are smaller and thus have less extra non-required capacity. + + Alignment is required when the bins have to be aligned on a specific value. + For example: + 1. Configured alignment (capacity_bins_alignment) due to e.g. hardware specific considerations + 2. When the non-experts are using TP and the experts ate not using TP, we + need to align the bins on TP boundary. + + Args: + gate_output (torch.Tensor): router gating function output tensor + + Returns: + bins tensor (torch.Tensor dtype=torch.long) + """ + # in case of first forward, initialize information based on gate_output + if self.min_tokens_per_expert is None: + # calculate optimal and worst case (min and max) tokens per expert + n_tokens_in_micro_batch = torch.tensor(size, device=device).to(torch.long) + n_optimal_tokens_per_expert = torch.ceil(self.k * n_tokens_in_micro_batch / self.num_experts).to( + torch.long) + self.min_tokens_per_expert = n_optimal_tokens_per_expert.item() + self.max_tokens_per_expert = n_tokens_in_micro_batch.item() + # handle bin alignment - maximum between configured alignment and TP (if used) + tp_alignment = 1 + if groups._get_expert_model_parallel_world_size() == 1 and groups.mpu is not None: + tp_alignment = bwc_tensor_model_parallel_world_size(groups.mpu) + self.alignment = max(self.configured_alignment, tp_alignment) + + # if bins configured (either configured by user or loaded from checkpoint) - verify valid bins + # otherwise, initialize bins + if self.capacity_bins[0] > 0: + if self.training and not self._verify_configured_bins(): + # temporary WA for diff in parameters such as seql, bs (number of tokens per expert change) after load from checkpoint + self.capacity_bins = self._generate_bins() + else: + self.capacity_bins = self._generate_bins() + + return self.capacity_bins + + +def optimize_bins(min_range, bins: torch.Tensor, bins_usage: torch.Tensor, alignment, min_bin_size) -> list: + """ Optimize MOE capacity bins according to collected bins usage statistics + + The bins are optimized to minimize the cost of binning. + The cost of each bin is defined as the additional tokens processed in this bin. + Since we don't have the actual capacities that were mapped to each bin, we use the median of the bin. + After we calculate the cost of all bins, we iteratively try to replace the lowest and highest cost bins + with 2 bins: the original highest cost bin and the median of the highest cost bin. + This way, we keep the number of bins constant while decreasing the overall cost of binning. + + For example: + Given bins [150, 200, 250, 300] with start of range=100 + And usage [100, 0, 50, 10 ] + + We first calculate the cost of each bin: + Cost: [25*100, 25*0, 25*50, 25*10] = [2500, 0, 1250, 250] + + Lowest cost bin is 200 (index=1) + Highest cost bin is 150 (index=0) + + First iteration of optimization: + Remove bin1 and split bin0 --> [125, 150, 250, 300] + """ + + def align_to(value): + return int(math.ceil(value / alignment) * alignment) + + # sort bins by their cost of usage (we want to split high cost bins) + # we assume that for each bin, the cost is 1/2 of its width * usage count + shifted_bins = torch.cat([torch.tensor([min_range], dtype=bins.dtype, device=bins.device), bins[:-1]]) + width = bins - shifted_bins + cost = bins_usage * width / 2.0 + sorted_cost = torch.argsort(cost, descending=False, stable=True).tolist() + + # sorted cost is in ascending order + # min_sort_idx is current index into sorted_cost for candidate bin to be removed + # max_sort_idx is current index into sorted_cost for candidate bin to be split + bins = bins.tolist() + n_bins = len(bins) + min_sort_idx = 0 + max_sort_idx = n_bins - 1 + new_bins = [] + while min_sort_idx <= max_sort_idx: + # if same cost, keep all remaining bins and exit + # this also handles the case of min_sort_idx == max_sort_idx + min_cost = cost[sorted_cost[min_sort_idx]] + max_cost = cost[sorted_cost[max_sort_idx]] + if min_cost == max_cost: + bin_indexes = sorted_cost[min_sort_idx:max_sort_idx + 1] + new_bins.extend([bins[idx] for idx in bin_indexes]) + break + + # last bin can't be removed + min_bin_idx = sorted_cost[min_sort_idx] + if min_bin_idx == (n_bins - 1): + new_bins.append(bins[min_bin_idx]) + min_sort_idx += 1 + continue + + # calculate the left & right bin's width of the candidate bin after we split it to 2 + # verify that both left & right will meet the min bin size requirement + max_bin_idx = sorted_cost[max_sort_idx] + max_bin_start = min_range if max_bin_idx == 0 else bins[max_bin_idx - 1] + max_bin_end = bins[max_bin_idx] + mid_point = (max_bin_start + max_bin_end) // 2 + mid_point = align_to(mid_point) + left_bin_width = mid_point - max_bin_start + right_bin_width = max_bin_end - mid_point + if left_bin_width < min_bin_size or right_bin_width < min_bin_size: + new_bins.append(bins[max_bin_idx]) + max_sort_idx -= 1 + continue + + # skip min cost bin and split max cost bin + new_bins.append(mid_point) + new_bins.append(max_bin_end) + min_sort_idx += 1 + max_sort_idx -= 1 + + # sort the bins in ascending order + bins = sorted(new_bins) + return bins diff --git a/deepspeed/moe/layer.py b/deepspeed/moe/layer.py index 6777788ab885..c2459e2d6afb 100644 --- a/deepspeed/moe/layer.py +++ b/deepspeed/moe/layer.py @@ -10,8 +10,10 @@ from torch.nn import functional as F from deepspeed.utils import groups, log_dist +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size from .experts import Experts from .sharded_moe import MOELayer, TopKGate +from deepspeed.accelerator import get_accelerator class MoE(nn.Module): @@ -33,6 +35,10 @@ class MoE(nn.Module): use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed). enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts top2_2nd_expert_sampling (bool, optional): default=True, whether to perform sampling for 2nd expert + num_capacity_bins (int, optional): default=0, number of capacity bins to use in case of drop_tokens=False + capacity_bins_exp_base (float, optional): default=2.0, in case of capacity bins, exponential growing factor for bin width + capacity_bins_alignment (int, optional): default=1, in case of capacity bins, required bins alignment + configured_capacity_bins (list, optional): default=None, explicit configuration of capacity bin edges """ def __init__(self, @@ -50,7 +56,12 @@ def __init__(self, use_rts: bool = True, use_tutel: bool = False, enable_expert_tensor_parallelism: bool = False, - top2_2nd_expert_sampling: bool = True) -> None: + top2_2nd_expert_sampling: bool = True, + sequence_parallel: bool = False, + num_capacity_bins: int = 0, + capacity_bins_exp_base: float = 2.0, + capacity_bins_alignment: int = 1, + configured_capacity_bins: Optional[list] = None) -> None: super(MoE, self).__init__() @@ -61,7 +72,10 @@ def __init__(self, self.expert_group_name = f"ep_size_{self.ep_size}" self.num_experts = num_experts self.num_local_experts = num_experts // self.ep_size - + self.sequence_parallel = sequence_parallel + self.drop_tokens = drop_tokens + #TODO SW-179530: remove workaround when issue with lazy is resolved (see SW-179530). + expert.to(get_accelerator().device_name()) log_dist( f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}', [0]) @@ -70,14 +84,28 @@ def __init__(self, 'Unsupported noisy_gate_policy: ' + noisy_gate_policy experts = Experts(expert, self.num_local_experts, self.expert_group_name) - self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor, - min_capacity, noisy_gate_policy, drop_tokens, use_rts, None, - top2_2nd_expert_sampling), + self.deepspeed_moe = MOELayer(TopKGate(hidden_size, + num_experts, + k, + capacity_factor, + eval_capacity_factor, + min_capacity, + noisy_gate_policy, + drop_tokens, + use_rts, + None, + top2_2nd_expert_sampling, + self.sequence_parallel, + num_capacity_bins, + capacity_bins_exp_base, + capacity_bins_alignment, + configured_bins=configured_capacity_bins), experts, self.expert_group_name, self.ep_size, self.num_local_experts, - use_tutel=use_tutel) + use_tutel=use_tutel, + sequence_parallel=self.sequence_parallel) if self.use_residual: self.mlp = expert # coefficient is used for weighted sum of the output of expert and mlp @@ -87,20 +115,31 @@ def set_deepspeed_parallelism(self, use_data_before_expert_parallel_: bool = Fal self._create_process_groups(use_data_before_expert_parallel_=use_data_before_expert_parallel_) def _create_process_groups(self, use_data_before_expert_parallel_: bool = False) -> None: + # For sequence-parallel + expert-tp + no token-dropping, create a process group with ranks of EP + TP. + # This group is required to reduce_max the local token capacity across EP + TP ranks. + tp_enabled = bwc_tensor_model_parallel_world_size(groups.mpu) > 1 + expert_tp_enabled = self.enable_expert_tensor_parallelism and tp_enabled + use_ep_tp_group = self.sequence_parallel and not self.drop_tokens and expert_tp_enabled + # Create process group for a layer if needed if self.expert_group_name not in groups._get_expert_parallel_group_dict(): print(f"No existing process group found, creating a new group named: {self.expert_group_name}") - if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism): - # Condition 1 - no groups.mpu means no tensor parallelism - # Condition 2 - disabling expert tensor parallelism on purpose + if not expert_tp_enabled: + # expert tensor parallelism is disabled, use only expert parallelism and data parallelism groups._create_expert_and_data_parallel( self.ep_size, use_data_before_expert_parallel_=use_data_before_expert_parallel_) else: - # expert tensor parallelism is enabled + # expert tensor parallelism is enabled, use expert, data and tensor parallelism groups._create_expert_data_and_model_parallel( - self.ep_size, mpu=groups.mpu, use_data_before_expert_parallel_=use_data_before_expert_parallel_) + self.ep_size, + mpu=groups.mpu, + use_data_before_expert_parallel_=use_data_before_expert_parallel_, + create_expert_tensor_parallel_group=use_ep_tp_group) + # Set the group handle for the MOELayer (deepspeed_moe) object self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name)) + if use_ep_tp_group: + self.deepspeed_moe._set_ep_tp_group(groups._get_expert_tensor_parallel_group(self.expert_group_name)) def forward(self, hidden_states: torch.Tensor, diff --git a/deepspeed/moe/sharded_moe.py b/deepspeed/moe/sharded_moe.py index 96eab5e2ab17..21b06c5dfb96 100644 --- a/deepspeed/moe/sharded_moe.py +++ b/deepspeed/moe/sharded_moe.py @@ -26,6 +26,7 @@ import torch.nn.functional as F from deepspeed.utils import groups from .mappings import drop_tokens, gather_tokens +from .capacity_bins import CapacityBins if TYPE_CHECKING: Base = Module[Tensor] @@ -178,6 +179,12 @@ def _one_hot_to_float(x, num_classes): return F.one_hot(x, num_classes=num_classes).float() +def _calculate_expert_weight(gates: Tensor, mask: Tensor, locations: Tensor, capacity: Tensor) -> Tensor: + gates = einsum("s,se->se", gates, mask) + locations = _one_hot_to_float(locations, capacity) + return einsum("se,sc->sec", gates, locations) + + def top1gating(logits: Tensor, capacity_factor: float, min_capacity: int, @@ -186,7 +193,9 @@ def top1gating(logits: Tensor, drop_tokens: bool = True, use_rts: bool = True, ep_group: Union[torch.distributed.ProcessGroup, None] = None, - use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + use_tutel: bool = False, + ep_tp_group: Union[torch.distributed.ProcessGroup, None] = None, + capacity_bins: Union[CapacityBins, None] = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top1Gating on logits.""" if noisy_gate_policy == 'RSample': logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) @@ -206,13 +215,14 @@ def top1gating(logits: Tensor, mask1 = einsum("s,se->se", used_token, mask1) # gating decisions - exp_counts = torch.sum(mask1, dim=0).detach().to('cpu') + exp_counts = torch.sum(mask1, dim=0) # if we don't want to drop any tokens if not drop_tokens: - new_capacity = torch.max(exp_counts).to(logits.device) + new_capacity = torch.max(exp_counts) # Communicate across expert processes to pick the maximum capacity. - if ep_group is not None: + group = ep_tp_group if ep_tp_group is not None else ep_group + if group is not None: dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) if groups._get_expert_model_parallel_world_size() == 1: # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. @@ -221,6 +231,9 @@ def top1gating(logits: Tensor, new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) # Make sure the capacity value does not exceed the number of tokens. capacity = min(new_capacity, torch.tensor(mask1.size(0)).to(new_capacity.device)) + if capacity_bins is not None: + capacity = capacity_bins.get_binned_capacity(gate_output=logits, capacity=capacity) + exp_counts = exp_counts.detach().to('cpu') # Compute l_aux me = torch.mean(gates, dim=0) @@ -290,7 +303,9 @@ def top2gating(logits: Tensor, min_capacity: int, drop_tokens: bool = True, ep_group: Union[torch.distributed.ProcessGroup, None] = None, - top2_2nd_expert_sampling: bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + top2_2nd_expert_sampling: bool = True, + ep_tp_group: Union[torch.distributed.ProcessGroup, None] = None, + capacity_bins: Union[CapacityBins, None] = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top2Gating on logits.""" # everything is in fp32 in this function gates = F.softmax(logits, dim=1) @@ -303,7 +318,7 @@ def top2gating(logits: Tensor, if top2_2nd_expert_sampling: # Create a mask for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ - logits += gumbel_rsample(logits.shape, device=logits.device) + logits = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1 = logits.masked_fill(mask1.bool(), float("-inf")) @@ -332,14 +347,17 @@ def top2gating(logits: Tensor, else: # Do not drop tokens - set capacity according to current expert assignments new_capacity = torch.max(exp_counts) + group = ep_tp_group if ep_tp_group is not None else ep_group if ep_group is not None: - dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) + dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=group) if groups._get_expert_model_parallel_world_size() == 1: # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. # This is since we are going to activate drop_tokens() to drop duplicate tokens. tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu) new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) capacity = new_capacity + if capacity_bins is not None: + capacity = capacity_bins.get_binned_capacity(gate_output=logits, capacity=capacity) # Store the capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) @@ -357,13 +375,8 @@ def top2gating(logits: Tensor, gates2_s /= denom_s # Calculate combine_weights and dispatch_mask - gates1 = einsum("s,se->se", gates1_s, mask1_float) - gates2 = einsum("s,se->se", gates2_s, mask2_float) - locations1_sc = _one_hot_to_float(locations1_s, capacity) - locations2_sc = _one_hot_to_float(locations2_s, capacity) - combine1_sec = einsum("se,sc->sec", gates1, locations1_sc) - combine2_sec = einsum("se,sc->sec", gates2, locations2_sc) - combine_weights = combine1_sec + combine2_sec + combine_weights = _calculate_expert_weight(gates1_s, mask1_float, locations1_s, capacity) + combine_weights += _calculate_expert_weight(gates2_s, mask2_float, locations2_s, capacity) dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask, exp_counts.detach().to('cpu') @@ -398,7 +411,12 @@ def __init__(self, drop_tokens: bool = True, use_rts: bool = True, ep_group: Union[torch.distributed.ProcessGroup, None] = None, - top2_2nd_expert_sampling: bool = True) -> None: + top2_2nd_expert_sampling: bool = True, + sequence_parallel: bool = False, + num_capacity_bins: int = 0, + capacity_bins_exp_base: float = 2.0, + capacity_bins_alignment: int = 1, + configured_bins: Union[list, None] = None) -> None: super().__init__() # Only top-1 and top-2 are supported at the moment. @@ -406,6 +424,8 @@ def __init__(self, raise ValueError('Only top-1 and top-2 gatings are supported.') self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) self.ep_group = ep_group + self.ep_tp_group = None + self.num_experts = num_experts self.k = k self.capacity_factor = capacity_factor self.eval_capacity_factor = eval_capacity_factor @@ -417,6 +437,23 @@ def __init__(self, self.drop_tokens = drop_tokens self.use_rts = use_rts self.top2_2nd_expert_sampling = top2_2nd_expert_sampling + self.sequence_parallel = sequence_parallel + if self.sequence_parallel: + setattr(self.wg.weight, 'sequence_parallel', True) + self.capacity_bins = None + if not self.drop_tokens and num_capacity_bins > 0: + assert capacity_bins_exp_base >= 1.0, \ + f'capacity_bins_exp_base must be >= 1.0, but got {capacity_bins_exp_base}' + self.capacity_bins = CapacityBins(k, + num_experts, + num_capacity_bins, + capacity_bins_exp_base, + capacity_bins_alignment, + configured_bins=configured_bins) + + def _set_ep_tp_group(self, ep_tp_group): + assert self.ep_tp_group is None, f'Attempting to override an existing ep_tp_group' + self.ep_tp_group = ep_tp_group def _set_ep_group(self, ep_group): assert self.ep_group is None, f'Attempting to override an existing ep_group' @@ -439,11 +476,13 @@ def forward(self, if self.k == 1: gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, self.min_capacity, used_token, self.noisy_gate_policy if self.training else None, - self.drop_tokens, self.use_rts, self.ep_group, use_tutel) + self.drop_tokens, self.use_rts, self.ep_group, use_tutel, self.ep_tp_group, + self.capacity_bins) else: gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, - self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling) + self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling, + self.ep_tp_group, self.capacity_bins) if self.wall_clock_breakdown: self.timers(TOPK_GATE_TIMER).stop() @@ -451,6 +490,33 @@ def forward(self, return gate_output + def get_stats(self, incremental=True): + if self.capacity_bins is not None: + capacity_stats = self.capacity_bins.get_stats(incremental) + if capacity_stats is not None: + return {'capacity_bins': capacity_stats} + return None + + def has_capacity_bins(self): + return self.capacity_bins is not None + + def get_capacity_bins(self): + return self.capacity_bins + + +class AuxLossForSequenceParallel(torch.autograd.Function): + """ For sequence parallel, we need to divide the auxiliary loss gradient by TP. """ + + @staticmethod + def forward(ctx, aux_loss): + return aux_loss.clone() + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + grad_input *= (1.0 / bwc_tensor_model_parallel_world_size(mpu=groups.mpu)) + return grad_input + class MOELayer(Base): """MOELayer module which implements MixtureOfExperts as described in Gshard_. @@ -476,7 +542,8 @@ def __init__(self, ep_group_name, ep_size, num_local_experts: int, - use_tutel: bool = False) -> None: + use_tutel: bool = False, + sequence_parallel: bool = False) -> None: super().__init__() self.gate = gate self.experts = experts @@ -489,6 +556,8 @@ def __init__(self, self.time_moe = 0.0 self.timers = SynchronizedWallClockTimer() self.wall_clock_breakdown = False + self.sequence_parallel = sequence_parallel + self.l_aux = None self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1 @@ -505,6 +574,10 @@ def _set_ep_group(self, ep_group): self.ep_group = ep_group self.gate._set_ep_group(ep_group) + def _set_ep_tp_group(self, ep_tp_group): + self.ep_tp_group = ep_tp_group + self.gate._set_ep_tp_group(ep_tp_group) + def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: if self.wall_clock_breakdown: @@ -518,6 +591,8 @@ def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: # group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1 reshaped_input = input[0].reshape(-1, d_model) + expert_model_parallel_world_size = groups._get_expert_model_parallel_world_size() + if self.use_tutel: self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True) S, M = reshaped_input.size(0), reshaped_input.size(1) @@ -527,13 +602,16 @@ def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C) dispatched_input = self._tutel_dispatcher.encode(reshaped_input) else: - self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1]) + l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1]) dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input) + if self.sequence_parallel and expert_model_parallel_world_size > 1: + l_aux = AuxLossForSequenceParallel.apply(l_aux) + self.l_aux = l_aux if self.wall_clock_breakdown: self.timers(FIRST_ALLTOALL_TIMER).start() - if groups._get_expert_model_parallel_world_size() == 1: + if expert_model_parallel_world_size == 1: # If the non-expert is tensor-parallel, it will create # duplicate tokens on the tensor-parallel ranks. # Since our experts are not tensor-parallel, these duplicates diff --git a/deepspeed/ops/transformer/inference/config.py b/deepspeed/ops/transformer/inference/config.py index 9709328cc133..c0dd29f4f962 100644 --- a/deepspeed/ops/transformer/inference/config.py +++ b/deepspeed/ops/transformer/inference/config.py @@ -103,7 +103,6 @@ def __init__(self, self.return_tuple = return_tuple self.mlp_after_attn = mlp_after_attn self.mlp_act_func_type = mlp_act_func_type - self.specialized_mode = False self.training_mp_size = training_mp_size self.bigscience_bloom = bigscience_bloom self.max_out_tokens = max_out_tokens diff --git a/deepspeed/ops/transformer/inference/diffusers_attention.py b/deepspeed/ops/transformer/inference/diffusers_attention.py index 5efc560db75e..c384ee77c03d 100644 --- a/deepspeed/ops/transformer/inference/diffusers_attention.py +++ b/deepspeed/ops/transformer/inference/diffusers_attention.py @@ -10,10 +10,11 @@ from packaging import version as pkg_version from deepspeed.utils.logging import log_dist from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp +from deepspeed.ops.transformer.inference.op_binding.softmax_context import SoftmaxContextOp +from deepspeed.ops.transformer.inference.op_binding import LinearOp +from deepspeed.ops.transformer.inference.op_binding.pad_transform import PadTransformOp -# Cuda modules will be imported if needed -inference_module = None minus_inf = -10000.0 triton_flash_attn = None @@ -36,7 +37,8 @@ class DeepSpeedDiffusersAttentionFunction(Function): @staticmethod def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb, num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob, - do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel, rope_theta): + do_out_bias, score_context_func, linear_func, pad_transform_func, triton_flash_attn_kernel, + rope_theta): def _transpose_for_context(x): x = x.permute(0, 2, 1, 3) @@ -77,7 +79,7 @@ def selfAttention_fp(input, context, input_mask): query = query.contiguous() key = key.contiguous() value = value.contiguous() - query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn) + query, key, value = pad_transform_func(query, key, value, config.heads, do_flash_attn) attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1) context_layer = _transpose_for_context(torch.matmul(attention_scores, value)) @@ -117,10 +119,6 @@ def __init__( data_type = self.config.dtype data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedDiffusersAttention.layer_id == 1: log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0]) @@ -171,26 +169,24 @@ def __init__( self.norm_factor *= math.sqrt(self.config.layer_id + 1) # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 - if self.config.dtype in [torch.float16, torch.int8]: - self.score_context_func = inference_module.softmax_context_fp16 - self.linear_func = inference_module.linear_layer_fp16 - self.allocate_workspace = inference_module.allocate_workspace_fp16 - else: - self.score_context_func = inference_module.softmax_context_fp32 - self.linear_func = inference_module.linear_layer_fp32 - self.allocate_workspace = inference_module.allocate_workspace_fp32 + self.allocate_workspace_func = WorkspaceOp(self.config).allocate_workspace + self.score_context_func = SoftmaxContextOp(self.config) + self.linear_func = LinearOp(self.config) + self.pad_transform_func = PadTransformOp(self.config) - def forward(self, input, context=None, input_mask=None): + def allocate_workspace(self, size): + # Allocate memory only on first layer forward if self.config.layer_id == 0: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, - 0, self.config.max_out_tokens, self.config.min_out_tokens) - output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw, - self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb, - self.num_attention_heads_per_partition, self.norm_factor, - self.hidden_size_per_partition, self.attn_ow, self.attn_ob, - self.do_out_bias, self.score_context_func, self.linear_func, - self.triton_flash_attn_kernel, self.config.rope_theta) + self.allocate_workspace_func(self.config.hidden_size, self.config.heads, size[1], size[0], + DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, 0, + self.config.max_out_tokens, self.config.min_out_tokens) + + def forward(self, input, context=None, input_mask=None): + self.allocate_workspace(input.size()) + output = DeepSpeedDiffusersAttentionFunction.apply( + input, context, input_mask, self.config, self.attn_qkvw, self.attn_qw, self.attn_kw, self.attn_vw, + self.attn_qkvb, self.num_attention_heads_per_partition, self.norm_factor, self.hidden_size_per_partition, + self.attn_ow, self.attn_ob, self.do_out_bias, self.score_context_func, self.linear_func, + self.pad_transform_func, self.triton_flash_attn_kernel, self.config.rope_theta) return output diff --git a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py index b0156f905a06..d01638f36e40 100644 --- a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py +++ b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py @@ -10,26 +10,9 @@ from .diffusers_attention import DeepSpeedDiffusersAttention from .bias_add import nhwc_bias_add from .diffusers_2d_transformer import Diffusers2DTransformerConfig -from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder from deepspeed.utils.types import ActivationFuncType - -# Ops will be loaded on demand -transformer_cuda_module = None -spatial_cuda_module = None - - -def load_transformer_module(): - global transformer_cuda_module - if transformer_cuda_module is None: - transformer_cuda_module = InferenceBuilder().load() - return transformer_cuda_module - - -def load_spatial_module(): - global spatial_cuda_module - if spatial_cuda_module is None: - spatial_cuda_module = SpatialInferenceBuilder().load() - return spatial_cuda_module +from .op_binding.gated_activation import GatedActivationOp +from .op_binding.layer_norm import LayerNormOp class DeepSpeedDiffusersTransformerBlock(nn.Module): @@ -76,8 +59,8 @@ def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerC else: self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False) - self.transformer_cuda_module = load_transformer_module() - load_spatial_module() + self.gated_activation = GatedActivationOp() + self.layer_norm = LayerNormOp() def forward(self, hidden_states, context=None, timestep=None, **kwargs): # In v0.12.0 of diffuser, several new kwargs were added. Capturing @@ -88,17 +71,17 @@ def forward(self, hidden_states, context=None, timestep=None, **kwargs): if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] is not None: context = kwargs["encoder_hidden_states"] - out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) + out_norm_1 = self.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) out_attn_1 = self.attn_1(out_norm_1) - out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_2, out_attn_1 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps) out_attn_2 = self.attn_2(out_norm_2, context=context) - out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_3, out_attn_2 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps) out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w) - out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) + out_geglu = self.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) out_ff2 = nn.functional.linear(out_geglu, self.ff2_w) return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2) diff --git a/deepspeed/ops/transformer/inference/ds_attention.py b/deepspeed/ops/transformer/inference/ds_attention.py index ffb58175daad..fc3a4664ea2e 100644 --- a/deepspeed/ops/transformer/inference/ds_attention.py +++ b/deepspeed/ops/transformer/inference/ds_attention.py @@ -89,7 +89,7 @@ def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device) ] - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] @@ -108,7 +108,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=DeepSpeedSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -136,7 +139,8 @@ def forward(self, output_attentions=False, norm_w=None, norm_b=None, - alibi=None): + alibi=None, + **kwargs): if self.attn_qkvw is None: self._attn_qkvw, self._attn_qkvb = self._merge_qkv() else: @@ -157,10 +161,17 @@ def forward(self, gamma=norm_w, beta=norm_b) + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) + context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] @@ -210,7 +221,7 @@ def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_ return tensor_list - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] @@ -246,8 +257,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): matmul_result = torch.matmul(query_layer, key_layer) # change view to [batch_size, num_heads, q_length, k_length] attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1) - - offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0 + if self.config.mp_size > 1 and dist.is_initialized(): + offset = dist.get_rank() * self.num_attention_heads_per_partition + else: + offset = 0 target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype # When using the hybrid engine with BLOOM, input_mask needs to be converted from torch.bool -> torch.int64 diff --git a/deepspeed/ops/transformer/inference/moe_inference.py b/deepspeed/ops/transformer/inference/moe_inference.py index 8766b65e866d..da3981c13714 100644 --- a/deepspeed/ops/transformer/inference/moe_inference.py +++ b/deepspeed/ops/transformer/inference/moe_inference.py @@ -7,16 +7,16 @@ import math import torch from torch.autograd import Function -# accelerator modules will be imported if needed -inference_module = None -specialized_mode = None import torch.nn as nn from .ds_attention import DeepSpeedSelfAttention from .config import DeepSpeedInferenceConfig +from .op_binding import SoftmaxOp, VectorMatMulOp, GELUGemmOp +from .op_binding.bias_residual import BiasResidualOp +from .op_binding.einsum_sec_sm_ecm import EinsumSecSmEcmOp +from .op_binding.layer_norm import LayerNormOp from ....moe.sharded_moe import TopKGate from deepspeed import comm as dist -from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from .op_binding.moe_res_matmul import MoEResMatmulOp class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): @@ -110,16 +110,13 @@ class DeepSpeedMLPFunction(Function): @staticmethod def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group, - async_op): + async_op, gelu_gemm_func, vector_matmul_func): if config.q_int8: - intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2], - (q_groups * (2**merge_count)), config.pre_layer_norm) - output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count)) + intermediate = gelu_gemm_func(input, inter_w, inter_b, config.epsilon, q_scales[2], + (q_groups * (2**merge_count)), config.pre_layer_norm) + output = vector_matmul_func(intermediate, output_w, q_scales[3], q_groups, (merge_count)) else: - mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \ - inference_module.fused_gemm_gelu_fp32 - - output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) + output = gelu_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) if mp_group is not None and dist.get_world_size(group=mp_group) > 1: dist.all_reduce(output, group=mp_group, async_op=async_op) @@ -150,10 +147,13 @@ def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_g self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups self.merge_count = int(math.log2(merge_count)) self.mp_group = mp_group + self.gelu_gemm_func = GELUGemmOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) def forward(self, input, async_op=False): return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w, - self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op) + self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op, + self.gelu_gemm_func, self.vector_matmul_func) class DeepSpeedMoEInference(nn.Module): @@ -187,18 +187,7 @@ def __init__(self, self.config = config self.config.layer_id = DeepSpeedMoEInference.layer_id - global inference_module - global specialized_mode - if inference_module is None: - specialized_mode = False - # InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string - builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder") - if builder is not None and builder.is_compatible(): - inference_module = builder.load() - specialized_mode = True - else: - inference_module = InferenceBuilder().load() - self.config.specialized_mode = specialized_mode + assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support" DeepSpeedMoEInference.layer_id += 1 @@ -213,10 +202,8 @@ def __init__(self, self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, mp_group) self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2)) - self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.softmax_fp32 - self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \ - inference_module.vector_matmul_fp32 + self.coef_func = SoftmaxOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) config.mp_size = 1 self.mlp = nn.ModuleList( @@ -234,12 +221,10 @@ def __init__(self, print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__) - self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.bias_residual_fp32 - self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.layer_norm_fp32 - self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.einsum_sec_sm_ecm_fp32 + self.bias_residual_func = BiasResidualOp(self.config) + self.ds_layernorm = LayerNormOp(self.config) + self.einsum_sec_sm_ecm = EinsumSecSmEcmOp(self.config) + self.moe_res_matmul = MoEResMatmulOp(self.config) def res_coef_func(self, inp, async_op): inp = self.vector_matmul_func(inp, self.res_coef, async_op) @@ -346,7 +331,7 @@ def forward(self, dim=0)[dist.get_rank(group=self.expert_mp_group)] if self.config.mlp_type == 'residual': - inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output) + self.moe_res_matmul(res_mlp_out, res_coef_out, output) output = self.bias_residual_func(output, residual_add, torch.empty(1)) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_add.py b/deepspeed/ops/transformer/inference/op_binding/bias_add.py new file mode 100644 index 000000000000..d2ae38f546eb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_add.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasAddOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_add_func = self.inference_module.bias_add_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_add_func = self.inference_module.bias_add_bf16 + else: + self.bias_add_func = self.inference_module.bias_add_fp32 + except AttributeError: + self.bias_add_func = self.bias_add_fallback + + @classmethod + def bias_add_fallback(cls, input, bias): + return torch.add(input, bias) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_add_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py new file mode 100644 index 000000000000..f0fee0b0d06e --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasGeluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasGeluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_gelu_func = self.inference_module.bias_gelu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_gelu_func = self.inference_module.bias_gelu_bf16 + else: + self.bias_gelu_func = self.inference_module.bias_gelu_fp32 + except AttributeError: + self.bias_gelu_func = self.bias_gelu_fallback + + @classmethod + def bias_gelu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally and using the tanh approximation + return F.gelu(activations.to(torch.float32) + bias.to(torch.float32), approximate='tanh').to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_gelu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_relu.py b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py new file mode 100644 index 000000000000..ccfade1d9524 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasReluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasReluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_relu_func = self.inference_module.bias_relu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_relu_func = self.inference_module.bias_relu_bf16 + else: + self.bias_relu_func = self.inference_module.bias_relu_fp32 + except AttributeError: + self.bias_relu_func = self.bias_relu_fallback + + @classmethod + def bias_relu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally + return F.relu(activations.to(torch.float32) + bias.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_relu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_residual.py b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py new file mode 100644 index 000000000000..ecad50e10ffe --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasResidualOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasResidualOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.bias_residual_func = self.inference_module.bias_residual_fp16 + else: + self.bias_residual_func = self.inference_module.bias_residual_fp32 + except AttributeError: + self.bias_residual_func = self.bias_residual_fallback + + @classmethod + def bias_residual_fallback(cls, output, residual, bias): + raise NotImplementedError("bias residual fallback isn't implemented") + + def forward(self, output, residual, bias): + return self.bias_residual_func(output, residual, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py new file mode 100644 index 000000000000..f34b10f786d1 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class EinsumSecSmEcmOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(EinsumSecSmEcmOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp16 + else: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp32 + except AttributeError: + self.einsum_sec_sm_ecm_func = self.einsum_sec_sm_ecm_fallback + + @classmethod + def einsum_sec_sm_ecm_fallback(cls, Q, W): + raise NotImplementedError("einsum sec sm ecm fallback isn't implemented") + + def forward(self, Q, W): + return self.einsum_sec_sm_ecm_func(Q, W) diff --git a/deepspeed/ops/transformer/inference/op_binding/gated_activation.py b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py new file mode 100644 index 000000000000..d28d818ce4b3 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from deepspeed.utils.types import ActivationFuncType +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class GatedActivationOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(GatedActivationOp, self).__init__(config) + try: + self.gated_activation_func = self.inference_module.gated_activation + except AttributeError: + self.gated_activation_func = self.gated_activation_fallback + + @classmethod + def gated_activation_fallback(cls, activation, bias, activation_func_type): + # Expected behavior is that of casting to float32 internally + # Explicitly using the default GeLU + activation_func = None + activations = activation + bias.reshape(1, 1, -1) + hidden_states, gate = activations.chunk(2, dim=-1) + + if activation_func_type == ActivationFuncType.GATED_SILU: + activation_func = F.silu + elif activation_func_type == ActivationFuncType.GATED_GELU: + activation_func = F.gelu + + return hidden_states * activation_func(gate.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor, activation_func_type: ActivationFuncType): + return self.gated_activation_func(activation, bias, activation_func_type) diff --git a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py index 63323c150752..60bbb4b48bdb 100644 --- a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py @@ -4,6 +4,7 @@ # DeepSpeed Team import torch +import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp import deepspeed @@ -14,7 +15,9 @@ class GELUGemmOp(BaseOp): def __init__(self, config: DeepSpeedInferenceConfig): super(GELUGemmOp, self).__init__(config) try: - if self.config.dtype in [torch.float16, torch.int8]: + if self.config.dtype == torch.int8: + self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_int8 + elif self.config.dtype == torch.float16: if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16: from deepspeed.ops.transformer.inference.triton.ops import fused_gemm_gelu as _triton_fused_gemm_gelu self.fused_gemm_gelu = _triton_fused_gemm_gelu # type: ignore @@ -28,7 +31,11 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.fused_gemm_gelu = self.gelu_gemm_fallback def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose): - raise NotImplementedError + tmp = torch.matmul(input, weight) + tmp = F.gelu(tmp.to(torch.float32) + bias.to(torch.float32), approximate="tanh").to(tmp.dtype) + output = torch.matmul(tmp, out) + + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/layer_norm.py b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py new file mode 100644 index 000000000000..31219a58ac3c --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class LayerNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + super(LayerNormOp, self).__init__(config) + try: + if config is None: + self.layer_norm_func = self.inference_module.layer_norm + elif self.config.dtype in [torch.float16, torch.int8]: + self.layer_norm_func = self.inference_module.layer_norm_fp16 + else: + self.layer_norm_func = self.inference_module.layer_norm_fp32 + except AttributeError: + self.layer_norm_func = self.layer_norm_fallback + + @classmethod + def layer_norm_residual(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f + bias_f + res_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + @classmethod + def layer_norm_residual_store_pre_ln_res(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + res_output = vals_f + bias_f + res_f + norm_output = F.layer_norm(res_output, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + return norm_output, res_output.to(dtype) + + @classmethod + def layer_norm_fallback(cls, vals, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + def forward(self, vals, gamma, beta, epsilon): + return self.layer_norm_func(vals, gamma, beta, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py index 3064c00d1755..97daf8b74bd8 100644 --- a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py @@ -5,12 +5,12 @@ from typing import Optional -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp from deepspeed.utils.types import NormType +from .pre_rms_norm import PreRMSNormOp class MLPGemmOp(BaseOp): @@ -39,23 +39,46 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.mlp_gemm_func = self.mlp_gemm_fallback elif self.config.norm_type == NormType.RMSNorm: self.mlp_gemm_func = self.rms_mlp_gemm_fallback + self.pre_rms_norm = PreRMSNormOp() def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose: - residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, - self.config.epsilon) - tmp = torch.matmul(residual_add, weight_interm) + if mlp_after_attn: + residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(residual_add, weight_interm.t() if transpose else weight_interm) tmp = F.gelu(tmp + bias) - output = torch.matmul(tmp, weight_out) - return (output, residual_add) + output = torch.matmul(tmp, weight_out.t() if transpose else weight_out) + + return output, residual_add else: + # TODO: SW-151870 implement mlp_gemm_fallback raise NotImplementedError def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - raise NotImplementedError + inp_norm, residual = self.pre_rms_norm(input, residual, gamma, eps) + tmp = torch.matmul(inp_norm.view([-1, inp_norm.size(2)]), weight_interm.t() if transpose else weight_interm) + up_proj, gate_proj = tmp.chunk(2, dim=1) + + from deepspeed.utils.types import ActivationFuncType + if mlp_act_func_type == ActivationFuncType.GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.ReLU: + intermediate = F.relu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_SILU: + intermediate = F.silu(gate_proj) + else: + raise f"rms_mlp_gemm_fallback not implemented for activation type {mlp_act_func_type}" + + intermediate = intermediate * up_proj + + output = torch.matmul(intermediate, weight_out.t() if transpose else weight_out) + output = output.view([input.size(0), input.size(1), -1]) + + return [output, residual] def forward(self, input: torch.Tensor, diff --git a/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py new file mode 100644 index 000000000000..ef3558c8bc88 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class MoEResMatmulOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(MoEResMatmulOp, self).__init__(config) + try: + self.moe_res_matmul_func = self.inference_module.moe_res_matmul + except AttributeError: + self.moe_res_matmul_func = self.moe_res_matmul_fallback + + @classmethod + def moe_res_matmul_fallback(cls, residual, coef, output): + coef_t = coef.transpose(1, 2).contiguous() + coef1, coef2 = torch.split(coef_t, split_size_or_sections=coef_t.shape[len(coef_t.shape) - 1] // 2, dim=-1) + return residual * coef1 + output * coef2 + + def forward(self, residual, coef, output): + return self.moe_res_matmul_func(residual, coef, output) diff --git a/deepspeed/ops/transformer/inference/op_binding/pad_transform.py b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py new file mode 100644 index 000000000000..876fefc3bcfb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class PadTransformOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PadTransformOp, self).__init__(config) + try: + self.pad_transform_func = self.inference_module.pad_transform_fp16 + except AttributeError: + self.pad_transform_func = self.pad_transform_fallback + + @staticmethod + def pad_transform_fallback(query, key, value, heads, do_flash_attn): + raise NotImplementedError("pad_transform fallback is not implemented.") + + def forward(self, query, key, value, heads, do_flash_attn): + return self.pad_transform_func(query, key, value, heads, do_flash_attn) diff --git a/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py new file mode 100644 index 000000000000..7969d20f0527 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +from .rms_norm import RMSNormOp + + +class PreRMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PreRMSNormOp, self).__init__(config) + try: + self.pre_rms_norm_func = self.inference_module.pre_rms_norm + except AttributeError: + self.pre_rms_norm_func = self.pre_rms_norm_fallback + + @staticmethod + def pre_rms_norm_fallback(vals, residual, gamma, epsilon): + residual = vals.to(torch.float32) + residual.to(torch.float32) + vals = residual + + return RMSNormOp.rms_norm_fallback(vals, gamma, epsilon), residual.to(gamma.dtype) + + def forward(self, vals, residual, gamma, epsilon): + return self.pre_rms_norm_func(vals, residual, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py index 250bf9864e1e..9ff5366fae5d 100644 --- a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .rms_norm import RMSNormOp import deepspeed from deepspeed.utils.types import NormType @@ -56,19 +56,23 @@ def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16): matmul(A, B) Fp16Matmul._update_autotune_table() - def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) - tmp = torch.matmul(inp_norm, weight) - if add_bias: - tmp += bias - output = [tmp, inp_norm] - return output - else: - raise NotImplementedError + @staticmethod + def qkv_gemm_fallback(input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + if add_bias: + tmp += bias + output = [tmp, inp_norm] + + return output + + @staticmethod + def rms_qkv_gemm_fallback(input, weight, q_scale, gamma, eps, q_int8, transpose): + inp_norm = RMSNormOp.rms_norm_fallback(input, gamma, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + output = [tmp, inp_norm] - def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose): - raise NotImplementedError + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/residual_add.py b/deepspeed/ops/transformer/inference/op_binding/residual_add.py index 6f9b35cbc05d..93b229c5d1ac 100644 --- a/deepspeed/ops/transformer/inference/op_binding/residual_add.py +++ b/deepspeed/ops/transformer/inference/op_binding/residual_add.py @@ -3,9 +3,10 @@ # DeepSpeed Team -import os import torch from typing import Optional + +from .vector_add import VectorAddOp from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -22,11 +23,32 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.residual_add_func = self.inference_module.residual_add_bias_fp32 except AttributeError: - self.residual_add_func = None - try: - self._vector_add = self.inference_module._vector_add - except AttributeError: - self._vector_add = None + self.residual_add_func = self.residual_add_fallback + self.vector_add = VectorAddOp() + + @staticmethod + def res_add_bias(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias, mp_size): + hidden_state += attn_output + (residual + final_bias) / mp_size + if add_attn_bias: + hidden_state += attn_bias / mp_size + + return hidden_state + + @staticmethod + def residual_add_fallback(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + if mlp_after_attn: + if pre_layer_norm: + tmp = (residual.float() + attention_output.float() + attention_bias.float() + + final_bias.float()) / mp_size + hidden_state.float() + else: + tmp = residual.float() + hidden_state.float() + final_bias.float() + else: + tmp = ResidualAddOp.res_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + add_bias, mp_size) + residual.copy_(tmp.to(hidden_state.dtype)) + + return residual def forward(self, hidden_state: torch.Tensor, @@ -37,28 +59,15 @@ def forward(self, attention_bias: Optional[torch.Tensor] = None, final_bias: Optional[torch.Tensor] = None): - if self.residual_add_func is not None: - if final_bias is None: - residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size) - else: - if not self.config.pre_layer_norm and residual_add is not None: - # only use residual add if its set and we are not pre layer norm - residual = residual_add - - self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, - self.config.mp_size, self.config.mlp_after_attn, add_bias, - self.config.pre_layer_norm) + if final_bias is None and attention_bias is None: + residual = self.vector_add(residual + attention_output, hidden_state, 1.0 / self.config.mp_size) else: - # fallback - if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn: - if self.config.pre_layer_norm: - tmp = (residual.float() + attention_output.float() + attention_bias.float() + - final_bias.float()) / self.config.mp_size + hidden_state.float() - else: - tmp = residual.float() + hidden_state.float() + final_bias.float() + if not self.config.pre_layer_norm and residual_add is not None: + # only use residual add if its set and we are not pre layer norm + residual = residual_add + + self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, + self.config.mp_size, self.config.mlp_after_attn, add_bias, + self.config.pre_layer_norm) - input_dtype = hidden_state.dtype - residual = tmp.to(input_dtype) - else: - raise NotImplementedError return residual diff --git a/deepspeed/ops/transformer/inference/op_binding/rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py new file mode 100644 index 000000000000..128883ce5d43 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class RMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(RMSNormOp, self).__init__(config) + try: + self.rms_norm_func = self.inference_module.rms_norm + except AttributeError: + self.rms_norm_func = self.rms_norm_fallback + + @staticmethod + def rms_norm_fallback(vals, gamma, epsilon): + variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) + vals = vals * torch.rsqrt(variance + epsilon) + + if gamma.dtype in [torch.float16, torch.bfloat16]: + vals = vals.to(gamma.dtype) + + return gamma * vals + + def forward(self, vals, gamma, epsilon): + return self.rms_norm_func(vals, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax.py b/deepspeed/ops/transformer/inference/op_binding/softmax.py index bc309d94df14..b408883d5cfd 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxOp(BaseOp): @@ -25,24 +25,45 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_func = self.softmax_fallback - def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, - async_op, layer_scale, head_offset, mp_size): - if os.environ.get('DS_KI_FALLBACK') == 'True': - alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition] - input_dtype = attn_scores.dtype - if (triangular): - tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) - attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min) - if alibi is not None: - attn_scores += alibi - if attn_mask is not None: - # expand atten_mask from two dim into 4 dim, insert two dims in the middle + @staticmethod + def softmax_fallback(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, async_op, + layer_scale, head_offset, mp_size): + scores_len = len(attn_scores.size()) + heads = 1 + if scores_len > 1: + heads = attn_scores.size()[1] + num_attention_heads_per_partition = heads // mp_size + + if alibi is not None: + if len(alibi.shape) == 1: + alibi = None + else: + alibi = alibi[head_offset:head_offset + num_attention_heads_per_partition] + if attn_mask is not None and len(attn_mask.shape) == 1: + attn_mask = None + input_dtype = attn_scores.dtype + attn_scores *= layer_scale + + if alibi is not None: + attn_scores += alibi + if attn_mask is not None: + # expand atten_mask from two dim into 4 dim, insert two dims in the middle + if len(attn_mask.shape) == 2: + # The above if statement was added because the mask was already 4D so this + # expansion should be avoided as it expands to 6D and crashes later (in bloom + # HE KI FB) attn_mask = attn_mask[:, None, None, :] - attn_scores += attn_mask - output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) - return output - else: - raise NotImplementedError + attn_scores += attn_mask + if triangular: + if attn_scores.shape[2] == 1: # query using kv cache + token_idx = InferenceContext.Instance().current_tokens() + tri = torch.arange(attn_scores.shape[2], device=attn_scores.device).ge(token_idx) + else: + tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) + attn_scores = torch.masked_fill(attn_scores, tri, float('-inf')) + output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) + + return output def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool, recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float, diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py index 0dc4e08a3633..b9b521e4cd53 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py @@ -7,6 +7,8 @@ from deepspeed import comm as dist from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .softmax import SoftmaxOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxContextOp(BaseOp): @@ -23,13 +25,109 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_context_func = self.softmax_context_fallback + @staticmethod + def transform4d_0213(x, seq_length): + assert x.dim() == 3, F"{x.dim()=} is not supported" + batch_size, num_heads, seq_length_head_dim = x.shape + head_dim = seq_length_head_dim // seq_length + x = x.view(batch_size, num_heads, seq_length, head_dim) + x = x.permute(0, 2, 1, 3) + + return x + + @staticmethod + def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep <= 1 or num_key_value_heads == 1: + return hidden_states + + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + @staticmethod + def bias_add_transform_0213(input, bias, num_heads, trans_count, perform_bias=False): + assert trans_count == 1 or trans_count == 3, F"{trans_count=} is not supported" + assert input.dim() == 3, F"{input.dim()=} is not supported" + input_biased = torch.add(input, bias) if perform_bias else input + batch_size, seq_length, value_size = input_biased.shape + hid_dim = value_size // trans_count + head_dim = hid_dim // num_heads + + if trans_count == 1: + query_layer = input.view(batch_size, seq_length, num_heads, head_dim) + query_layer = query_layer.permute(0, 2, 1, 3) + key_layer = torch.zeros_like(query_layer) + value_layer = torch.zeros_like(query_layer) + return query_layer, key_layer, value_layer + + qkv_layers = input.view(batch_size, seq_length, 3, num_heads, head_dim) + query_layer, key_layer, value_layer = qkv_layers[..., 0, :, :], qkv_layers[..., 1, :, :], qkv_layers[..., + 2, :, :] + query_layer = query_layer.transpose(1, 2) + key_layer = key_layer.transpose(1, 2) + value_layer = value_layer.transpose(1, 2) + + return query_layer, key_layer, value_layer + def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, norm_factor, triangular_masking, local_attention, window_size, no_masking, - layer_id, num_layers, alibi, rope_theta): - raise NotImplementedError + layer_id, num_layers, alibi, rope_theta, is_prompt, token_idx, position_ids): + bat_0213_query, bat_0213_key, bat_0213_value = self.bias_add_transform_0213( + query_key_value, None, heads, 3, False) + + if rotary_dim > 0 and rotate_half: + from transformers.models.llama.modeling_llama import apply_rotary_pos_emb + + rotary = InferenceContext.Instance().get_rotary(rotary_dim, rope_theta, bat_0213_value.device) + cos, sin = rotary(bat_0213_value, InferenceContext.Instance().get_max_tokens_num()) + # TODO: SW-170999 Optimize RoPE implementation. + bat_0213_query, bat_0213_key = apply_rotary_pos_emb(bat_0213_query, bat_0213_key, cos, sin, position_ids) + + bat_0213_key, bat_0213_value = InferenceContext.Instance().update_cache(layer_id, token_idx, is_prompt, + bat_0213_key, bat_0213_value) + + bat_0213_key = self.repeat_kv(bat_0213_key, num_kv) + bat_0213_value = self.repeat_kv(bat_0213_value, num_kv) + + bsz = query_key_value.shape[0] + head_dim = query_key_value.shape[2] // (heads * 3) + + bmm_output = torch.bmm(bat_0213_query.reshape(bsz * heads, bat_0213_query.shape[2], head_dim), + bat_0213_key.reshape(bsz * heads, bat_0213_key.shape[2], head_dim).transpose(1, 2)) + + layer_scale = 1.0 + if alibi is not None and len(alibi.shape) > 1: + layer_scale = max(1, layer_id).to(float) + + alpha = norm_factor * norm_factor / layer_scale + bmm_output *= alpha + bmm_output_reshape = bmm_output.reshape(bsz, heads, bmm_output.shape[1], bmm_output.shape[2]) + + recompute = is_prompt + if attn_mask is not None and len(attn_mask.shape) > 1 and attn_mask.shape[-1] < bmm_output_reshape.shape[3]: + attn_mask = torch.nn.functional.pad(attn_mask, (0, bmm_output_reshape.shape[3] - attn_mask.shape[-1]), + value=torch.finfo(attn_mask.dtype).min) + softmax_output = SoftmaxOp.softmax_fallback(bmm_output_reshape, attn_mask, alibi, triangular_masking, + recompute, local_attention, window_size, None, layer_scale, 0, 1) + + output = torch.bmm(softmax_output.reshape(bsz * heads, softmax_output.shape[2], softmax_output.shape[3]), + bat_0213_value.reshape(bsz * heads, bat_0213_value.shape[2], head_dim)) + + output = output.reshape(bsz, heads, output.shape[1], head_dim) + output = output.reshape(bsz, heads, output.shape[2] * head_dim) + input_seq_len = query_key_value.shape[1] + t4d_0123_output = self.transform4d_0213(output, input_seq_len) + t4d_0123_output = t4d_0123_output.reshape(bsz, t4d_0123_output.shape[1], heads * head_dim) + + if layer_id == num_layers - 1: + InferenceContext.Instance().advance_tokens() + + return t4d_0123_output, bat_0213_key, bat_0213_value def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, num_kv: int, - norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor): + norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor, + is_prompt: bool, token_idx: torch.Tensor, position_ids: torch.Tensor): if alibi is not None: batch_heads = query_key_value.shape[0] * heads @@ -42,6 +140,6 @@ def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: self.config.rotate_every_two, heads, num_kv, norm_factor, self.config.triangular_masking, self.config.local_attention, self.config.window_size, no_masking, layer_id, num_layers, alibi, - self.config.rope_theta) + self.config.rope_theta, is_prompt, token_idx, position_ids) return output diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_add.py b/deepspeed/ops/transformer/inference/op_binding/vector_add.py new file mode 100644 index 000000000000..015340a1084b --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/vector_add.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class VectorAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(VectorAddOp, self).__init__(config) + try: + self.vector_add_func = self.inference_module._vector_add + except AttributeError: + self.vector_add_func = self.vector_add_fallback + + @classmethod + def vector_add_fallback(cls, a, b, gamma): + """Based on csrc/transformer/inference/csrc/pt_binding.cpp code of _vector_add""" + dtype = a.dtype + return (gamma * a.float() + b.float()).to(dtype) + + def forward(self, a, b, gamma): + return self.vector_add_func(a, b, gamma) diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py index 011be859634d..cabab8d8c4ab 100644 --- a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py +++ b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py @@ -3,7 +3,6 @@ # DeepSpeed Team -import os import torch from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -25,7 +24,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.vector_matmul_func = self.inference_module.vector_matmul_fp16 elif self.config.dtype == torch.int8: - self.vector_matmul_func = self.inference_module.vector_matmul_fp16 + self.vector_matmul_func = self.inference_module.vector_matmul_int8 elif self.config.dtype == torch.bfloat16: self.vector_matmul_func = self.inference_module.vector_matmul_bf16 else: @@ -34,10 +33,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.vector_matmul_func = self.vector_matmul_fallback def vector_matmul_fallback(self, input, weight, async_op, q_scale, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - return torch.matmul(input, weight) - else: - raise NotImplementedError + return torch.matmul(input, weight.t() if transpose else weight) def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False): q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) diff --git a/deepspeed/ops/transformer/inference/op_binding/workspace.py b/deepspeed/ops/transformer/inference/op_binding/workspace.py new file mode 100644 index 000000000000..b06b1ca0bd32 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/workspace.py @@ -0,0 +1,204 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + +minus_inf = -10000.0 +key_idx = 0 +value_idx = 1 + + +class InferenceContext: + + __instance = None + + def __init__(self): + self.kv_cache = None + self.kv_cache_elem_dtype = None + self.num_tokens = 1 + self.kv_cache_num_layers = None + self.kv_cache_size = None + self.max_out_tokens = None + self.rotary = None + self.allocate_called = False + self.static_shapes = True + + @classmethod + def Instance(cls): + if InferenceContext.__instance is None: + InferenceContext.__instance = InferenceContext() + return InferenceContext.__instance + + def gen_workspace(self, num_layers, num_heads, batch_size, prompt_len, hidden_dim, mp_size, external_cache, + elem_dtype, rank, max_out_tokens, min_out_tokens): + self.allocate_called = True + self.kv_cache = None + if not external_cache: + self.kv_cache_num_layers = num_layers + self.max_out_tokens = max_out_tokens + head_size = hidden_dim // num_heads + self.kv_cache_size = torch.Size([batch_size, (num_heads // mp_size), max_out_tokens, head_size]) + self.kv_cache_elem_dtype = elem_dtype + self.num_tokens = 0 + self.static_shapes = True + return True + + def retake_workspace(self): + return True + + def _retake_workspace(self): + assert self.allocate_called, "retake workspace called before allocate workspace" + + import deepspeed.accelerator as accelerator + if self.kv_cache is None: + self.kv_cache = [] + for layer in range(self.kv_cache_num_layers): + self.kv_cache.append((torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()), + torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()))) + + return True + + def update_cache(self, layer_id, token_idx, is_prompt, bat_0213_key, bat_0213_value): + has_workspace = self._retake_workspace() + assert has_workspace, "Could not allocate workspace" + + # Update current token + if is_prompt: + self.static_shapes = True + if token_idx is None: + self.static_shapes = False + InferenceContext.Instance().reset_tokens(bat_0213_key.shape[2]) + else: + InferenceContext.Instance().reset_tokens(token_idx) + + if token_idx is None: + token_idx = InferenceContext.Instance().current_tokens() + + bsz = bat_0213_key.shape[0] + + # Update cache content + if is_prompt: + cache_max_seq = self.kv_cache_size[2] + cache_max_head_dim = self.kv_cache_size[3] + seq = bat_0213_key.shape[2] + + mask = torch.arange(cache_max_seq, device=bat_0213_key.device) + mask = mask.ge(token_idx) + mask = mask.unsqueeze(-1) + mask = mask.expand([cache_max_seq, cache_max_head_dim]) + + self.kv_cache[layer_id][key_idx][:bsz, :, :seq, :].copy_(bat_0213_key) + self.kv_cache[layer_id][key_idx][:bsz, :].masked_fill_(mask, 0) + self.kv_cache[layer_id][value_idx][:bsz, :, :seq, :].copy_(bat_0213_value) + self.kv_cache[layer_id][value_idx][:bsz, :].masked_fill_(mask, 0) + else: + if self.static_shapes: + assert type(token_idx) == torch.Tensor, "token_idx is expected to be torch.Tensor" + self.kv_cache[layer_id][key_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_key) + self.kv_cache[layer_id][value_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_value) + else: + assert type(token_idx) == int, "token_idx is expected to be int" + self.kv_cache[layer_id][key_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_key + self.kv_cache[layer_id][value_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_value + + bat_0213_key = self.kv_cache[layer_id][key_idx][:bsz] + bat_0213_value = self.kv_cache[layer_id][value_idx][:bsz] + + if not self.static_shapes: + bat_0213_key = bat_0213_key[:, :, :token_idx, :] + bat_0213_value = bat_0213_value[:, :, :token_idx, :] + + return bat_0213_key, bat_0213_value + + def release_workspace(self): + self.kv_cache = None + self.rotary = None + + def reset_tokens(self, initial_tokens=1): + self.num_tokens = initial_tokens + + def current_tokens(self): + return self.num_tokens + + def advance_tokens(self): + self.num_tokens = self.num_tokens + 1 + + def get_kv_cache(self): + return self.kv_cache + + def get_rotary(self, rotary_dim, rope_theta, device=None): + if self.rotary is None: + from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding + + self.rotary = LlamaRotaryEmbedding(rotary_dim, base=rope_theta, device=device) + + return self.rotary + + def get_max_tokens_num(self): + return self.max_out_tokens + + +class WorkspaceOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(WorkspaceOp, self).__init__(config) + + self.inference_context = InferenceContext.Instance() + try: + if config.dtype == torch.float32: + self.allocate_workspace = self.inference_module.allocate_workspace_fp32 + elif config.dtype == torch.bfloat16: + self.allocate_workspace = self.inference_module.allocate_workspace_bf16 + else: + self.allocate_workspace = self.inference_module.allocate_workspace_fp16 + self.release_workspace = self.inference_module.release_workspace + self.retake_workspace = self.inference_module.retake_workspace + self.reset_cache = self.inference_module.reset_cache + except AttributeError: + if config.dtype == torch.float32: + self.allocate_workspace = self.allocate_workspace_fp32_fallback + elif config.dtype == torch.bfloat16: + self.allocate_workspace = self.allocate_workspace_bf16_fallback + else: + self.allocate_workspace = self.allocate_workspace_fp16_fallback + self.release_workspace = self.release_workspace_fallback + self.retake_workspace = self.retake_workspace_fallback + self.reset_cache = self.reset_cache_fallback + + def allocate_workspace_fp32_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.float, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_bf16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.bfloat16, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_fp16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.half, rank, max_out_tokens, + min_out_tokens) + + def reset_cache_fallback(self): + return self.inference_context.reset_tokens() + + def release_workspace_fallback(self): + return self.inference_context.release_workspace() + + def retake_workspace_fallback(self): + return self.inference_context.retake_workspace() diff --git a/deepspeed/ops/transformer/inference/triton/attention.py b/deepspeed/ops/transformer/inference/triton/attention.py index c05370ec74e5..6845d91b06be 100644 --- a/deepspeed/ops/transformer/inference/triton/attention.py +++ b/deepspeed/ops/transformer/inference/triton/attention.py @@ -125,7 +125,7 @@ def _triton_autotune(min_seqlen, context_4d_matmul(output, qkv, head_size) Fp16Matmul._update_autotune_table() - def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list): qkv_out = qkv_out[0] @@ -143,7 +143,10 @@ def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=TritonSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -161,7 +164,8 @@ def forward( norm_w=None, norm_b=None, alibi=None, - use_triton_attention=True): + use_triton_attention=True, + **kwargs): if not self.config.pre_layer_norm: qkv_out = self.linear_func(input=input, @@ -192,10 +196,16 @@ def forward( triangular=self.triangular_masking) key_layer, value_layer = qkv[:, :, self.hidden_size:2 * self.hidden_size], qkv[:, :, 2 * self.hidden_size:] else: + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) context_layer, key_layer, value_layer = self.ds_compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + toke_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] diff --git a/deepspeed/ops/transformer/inference/triton/ops.py b/deepspeed/ops/transformer/inference/triton/ops.py index dd87d08d4d2c..dbed45313780 100644 --- a/deepspeed/ops/transformer/inference/triton/ops.py +++ b/deepspeed/ops/transformer/inference/triton/ops.py @@ -3,12 +3,10 @@ # DeepSpeed Team -import deepspeed -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed.ops.transformer.inference.triton.matmul_ext as matmul_ext +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.ops.transformer.inference.triton.layer_norm import layer_norm, layer_norm_residual - -inference_module = None +from deepspeed.utils.types import ActivationFuncType def vector_matmul_func(input, weight, async_op, q_scale, q_int8, transposed_mode): @@ -76,15 +74,12 @@ def mlp_gemm_func(input, if use_triton_ln: mlp_input = layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - mlp_input = inference_module._layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) + mlp_input = LayerNormOp.layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) # activation - if deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.GELU: + if ActivationFuncType(mlp_act_func_type) == ActivationFuncType.GELU: activation = "gelu" - elif deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.ReLU: + elif ActivationFuncType(mlp_act_func_type) == ActivationFuncType.ReLU: activation = "relu" else: activation = "" @@ -121,10 +116,7 @@ def qkv_gemm_func( if use_triton_ln: qkv_input = layer_norm(input, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - qkv_input = inference_module.layer_norm(input, gamma, beta, epsilon) + qkv_input = LayerNormOp()(input, gamma, beta, epsilon) qkv_out = matmul_ext.matmul(qkv_input, weight, bias=(bias if add_bias else None), activation="", use_triton=True) diff --git a/deepspeed/runtime/activation_checkpointing/checkpointing.py b/deepspeed/runtime/activation_checkpointing/checkpointing.py index 529931ca0df1..066ad25de456 100644 --- a/deepspeed/runtime/activation_checkpointing/checkpointing.py +++ b/deepspeed/runtime/activation_checkpointing/checkpointing.py @@ -30,14 +30,17 @@ from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.accelerator import get_accelerator from deepspeed.runtime import compiler +import os # DeepSpeed Checkpointing Enabled or Disabled deepspeed_checkpointing_enabled = False # MP parameters mpu = None -mp_rank = None -mp_size = None + +#set default values +mp_rank = 0 +mp_size = 1 mp_group = None # Model Parameters @@ -61,8 +64,6 @@ # Default name for the model parallel rng tracker. _MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' -transport_stream = None -cuda_device = None def detach_variable(inputs, device=None): @@ -518,35 +519,10 @@ def save_args_for_backward(*all_args): global mp_rank, mp_size, mp_group global contiguous_data_buffers, contiguous_size_buffers global data_offsets, size_offsets - if mp_rank is None: - if mpu is not None: - if hasattr(mpu, 'get_tensor_model_parallel_rank'): - mp_rank = mpu.get_tensor_model_parallel_rank() - mp_size = mpu.get_tensor_model_parallel_world_size() - mp_group = mpu.get_tensor_model_parallel_group() - else: - mp_rank = mpu.get_model_parallel_rank() - mp_size = mpu.get_model_parallel_world_size() - mp_group = mpu.get_model_parallel_group() - else: - mp_rank = 0 - mp_size = 1 - mp_group = None - - global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - - if cuda_device is None: - see_memory_usage("First Forward Beginning", force=False) - if dist.get_rank() == 0: - logger.info(f"Activation Checkpointing Information") - logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") - logger.info( - f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") - logger.info(f"----Synchronization {SYNCHRONIZE}") - logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + global PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - cuda_device = get_accelerator().current_device_name() - transport_stream = get_accelerator().Stream(device=cuda_device) + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) if PARTITION_ACTIVATIONS: inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) @@ -607,6 +583,9 @@ def backward(ctx, *grads): # removing pointers to the contiguous buffer memory # so that they can be garbage collected once the checkpoints # have been used + if grads[0].device.type == 'hpu': + import habana_frameworks.torch as htorch + htorch.core.mark_step() if SYNCHRONIZE: get_accelerator().synchronize() if PROFILE_TIME: @@ -631,8 +610,9 @@ def backward(ctx, *grads): raise RuntimeError("Checkpointing is not compatible with .grad(), " "please use .backward() if possible") - global cuda_device, transport_stream, PARTITION_ACTIVATIONS - + global PARTITION_ACTIVATIONS + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) # Rebuild deepspeed_saved_tensors for t in ctx.deepspeed_saved_tensors: if t is not None and hasattr(t, 'saved_data') and t.saved_data is not None: @@ -764,35 +744,10 @@ def save_args_for_backward(*all_args): global mp_rank, mp_size, mp_group global contiguous_data_buffers, contiguous_size_buffers global data_offsets, size_offsets - if mp_rank is None: - if mpu is not None: - if hasattr(mpu, 'get_tensor_model_parallel_rank'): - mp_rank = mpu.get_tensor_model_parallel_rank() - mp_size = mpu.get_tensor_model_parallel_world_size() - mp_group = mpu.get_tensor_model_parallel_group() - else: - mp_rank = mpu.get_model_parallel_rank() - mp_size = mpu.get_model_parallel_world_size() - mp_group = mpu.get_model_parallel_group() - else: - mp_rank = 0 - mp_size = 1 - mp_group = None - - global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - - if cuda_device is None: - see_memory_usage("First Forward Beginning", force=False) - if dist.get_rank() == 0: - logger.info(f"Activation Checkpointing Information") - logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") - logger.info( - f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") - logger.info(f"----Synchronization {SYNCHRONIZE}") - logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + global PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset - cuda_device = get_accelerator().current_device_name() - transport_stream = get_accelerator().Stream(device=cuda_device) + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) if PARTITION_ACTIVATIONS: inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) @@ -899,7 +854,9 @@ def replay_unpack(none_value): raise RuntimeError("Checkpointing is not compatible with .grad(), " "please use .backward() if possible") - global cuda_device, transport_stream, PARTITION_ACTIVATIONS + global PARTITION_ACTIVATIONS + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) # gather inputs which is partitioned or checkpointed before first forward if PARTITION_ACTIVATIONS: @@ -988,7 +945,23 @@ def after_backward_hook(_nonuse_grads): return tuple(all_outputs) -@compiler.disable # WA from Pytorch repo for compile + zero 3 accuracy issue +#TODO[SW-187093]: remove once WA in original pytorch code is resolved +disable_compile_bypass = os.environ.get('HL_DS_DISABLE_CHECKPOINTING_COMPILE_BYPASS', None) + + +def conditional_compile_disable(decorator_func): + + def conditional_decorator(function): + global disable_compile_bypass + if disable_compile_bypass: + return function # don't use compiler.disable decorator + return decorator_func(function) # use compiler.disable decorator + + return conditional_decorator + + +#TODO[SW-187093]: remove once WA in original pytorch code is resolved +@conditional_compile_disable(compiler.disable) # WA from Pytorch repo for compile + zero 3 accuracy issue def checkpoint(function, *args): """Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint. """ @@ -1151,6 +1124,27 @@ def configure( if CONTIGUOUS_CHECKPOINTING: assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing" + global mp_rank, mp_size, mp_group + + if mpu is not None: + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + mp_rank = mpu.get_tensor_model_parallel_rank() + mp_size = mpu.get_tensor_model_parallel_world_size() + mp_group = mpu.get_tensor_model_parallel_group() + else: + mp_rank = mpu.get_model_parallel_rank() + mp_size = mpu.get_model_parallel_world_size() + mp_group = mpu.get_model_parallel_group() + + #print configuration once + see_memory_usage("After configuration", force=False) + if dist.get_rank() == 0: + logger.info(f"Activation Checkpointing Information") + logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") + logger.info(f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") + logger.info(f"----Synchronization {SYNCHRONIZE}") + logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + def is_configured(): """True if deepspeed activation checkpointing has been configured diff --git a/deepspeed/runtime/bf16_optimizer.py b/deepspeed/runtime/bf16_optimizer.py index 1f3365b20f4e..3f37d8e974d6 100644 --- a/deepspeed/runtime/bf16_optimizer.py +++ b/deepspeed/runtime/bf16_optimizer.py @@ -302,8 +302,18 @@ def step(self, closure=None): mpu=self.mpu, use_graph=self.graph_harvesting) + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + self.optimizer.step() + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + # We need to link optimizer state after the first step() call self._lazy_init_hp_params_optimizer_state() @@ -526,7 +536,8 @@ def param_groups(self): def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): assert self.immediate_grad_update - self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=True) + # TODO: SW-195119 remove WA to clear lp grads after updating hp grads + self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=False) def create_grad_acc_hooks(self): self.grad_accs = [] diff --git a/deepspeed/runtime/config.py b/deepspeed/runtime/config.py index b49b4a8b6086..89d19da3ec9e 100755 --- a/deepspeed/runtime/config.py +++ b/deepspeed/runtime/config.py @@ -170,6 +170,26 @@ def get_bfloat16_enabled(param_dict): return False +def get_fp8_optimizer_enabled(param_dict): + if FP8_OPTIMIZER in param_dict.keys(): + return get_scalar_param(param_dict[FP8_OPTIMIZER], FP8_OPTIMIZER_ENABLED, FP8_OPTIMIZER_ENABLED_DEFAULT) + return FP8_OPTIMIZER_ENABLED_DEFAULT + + +def get_fp8_optimizer_master_weights_dtype(param_dict): + if FP8_OPTIMIZER in param_dict.keys(): + val = get_scalar_param(param_dict[FP8_OPTIMIZER], FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE, + FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE_DEFAULT) + if val == "fp32": + return torch.float32 + elif val == "fp16": + return torch.float16 + elif val == "bf16": + return torch.bfloat16 + raise ValueError(f"Invalid master_weights_dtype. Supported data types: ['fp16', 'bf16', 'fp32']. Got: {val}") + return torch.float32 + + def get_bfloat16_immediate_grad_update(param_dict): for key in [BFLOAT16, BFLOAT16_OLD]: if key in param_dict.keys(): @@ -830,6 +850,8 @@ def _initialize_params(self, param_dict): self.bfloat16_immediate_grad_update = get_bfloat16_immediate_grad_update(param_dict) assert not (self.fp16_enabled and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled' + self.fp8_optimizer_enabled = get_fp8_optimizer_enabled(param_dict) + self.fp8_optimizer_master_weights_dtype = get_fp8_optimizer_master_weights_dtype(param_dict) self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict) self.amp_enabled = get_amp_enabled(param_dict) self.amp_params = get_amp_params(param_dict) diff --git a/deepspeed/runtime/constants.py b/deepspeed/runtime/constants.py index 679230ca7d4c..e9fa9f131c1e 100755 --- a/deepspeed/runtime/constants.py +++ b/deepspeed/runtime/constants.py @@ -130,6 +130,25 @@ BFLOAT16_IMMEDIATE_GRAD_UPDATE = "immediate_grad_update" BFLOAT16_IMMEDIATE_GRAD_UPDATE_DEFAULT = False +######################################### +# FP8 optimizer support +######################################### +# By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +FP8_FORMAT = ''' +FP8 parameters should be of the format: +"fp8_optimizer": { + "enabled": true +} +''' +FP8_OPTIMIZER = "fp8_optimizer" + +FP8_OPTIMIZER_ENABLED = "enabled" +FP8_OPTIMIZER_ENABLED_DEFAULT = False + +FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE = "master_weights_dtype" +FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE_DEFAULT = "fp32" + ######################################### # FP16 support ######################################### diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py index 4c418fbc532e..c07baf452bb1 100644 --- a/deepspeed/runtime/engine.py +++ b/deepspeed/runtime/engine.py @@ -34,6 +34,7 @@ from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer from deepspeed.runtime.bf16_optimizer import BF16_Optimizer +from deepspeed.runtime.fp8_optimizer import FP8_Optimizer from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \ ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \ @@ -93,12 +94,14 @@ from .compiler import is_compile_supported from ..ops.adam import FusedAdam from ..moe.sharded_moe import TopKGate, MOELayer +from ..moe.capacity_bins import optimize_bins from ..moe.layer import MoE from ..moe.utils import is_moe_param, configure_moe_param_groups from ..git_version_info import version from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler from deepspeed.utils.logging import print_json_dist, print_configuration +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.accelerator import get_accelerator @@ -222,6 +225,7 @@ def __init__(self, self.num_experts = [] self.gate_modules = [] self.moe_layers = [] + self.has_sequence_parallel_params = False self._step_applied = False self._global_grad_norm = None self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend. @@ -249,6 +253,7 @@ def __init__(self, dist.configure(self._config) + self.fp8_optimizer = self._config.fp8_optimizer_enabled self.monitor = MonitorMaster(self._config.monitor_config) see_memory_usage( @@ -312,6 +317,14 @@ def __init__(self, elif self.bfloat16_enabled(): self.optimizer = self._configure_bf16_optimizer(optimizer=None) + #Sequence parallel related initialization + for param in self.module.parameters(): + if getattr(param, 'sequence_parallel', False): + self.has_sequence_parallel_params = True + break + if self.has_sequence_parallel_params: + assert self.mpu is not None, "sequence parallel allreduce only supported with tensor parallel enabled" + # Hook optimizer for snip_momentum pruning if hasattr(model, 'pruners'): from ..compression.helper import rewrite_optimizer_step @@ -362,6 +375,7 @@ def __init__(self, self.unflatten = _unflatten_dense_tensors self._is_compiled = False + self._is_optimizer_compiled = False def destroy(self): if self.optimizer is not None and hasattr(self.optimizer, 'destroy'): @@ -453,7 +467,10 @@ def get_global_grad_norm(self) -> float: Returns: float: norm """ - return self._global_grad_norm + grad_norm = self._global_grad_norm + if isinstance(grad_norm, torch.Tensor): + grad_norm = grad_norm.item() + return grad_norm def __getattr__(self, name): """ @@ -968,13 +985,13 @@ def _set_distributed_vars(self, args): device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank if device_rank >= 0: get_accelerator().set_device(device_rank) - self.device = torch.device(get_accelerator().device_name(), device_rank) + self.device = torch.device(get_accelerator().device_name(device_rank)) self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() else: self.world_size = 1 self.global_rank = 0 - self.device = torch.device(get_accelerator().device_name()) + self.device = get_accelerator().device() # Configure based on command line arguments def _configure_with_arguments(self, args, mpu): @@ -1247,6 +1264,8 @@ def _configure_optimizer(self, client_optimizer, model_parameters): if optimizer_wrapper == ZERO_OPTIMIZATION: self.optimizer = self._configure_zero_optimizer(basic_optimizer) + elif self.fp8_optimizer: + self.optimizer = self._configure_fp8_optimizer(basic_optimizer) elif optimizer_wrapper == AMP: amp_params = self.amp_params() log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0]) @@ -1476,6 +1495,28 @@ def _configure_bf16_optimizer(self, optimizer): return optimizer + def _configure_fp8_optimizer(self, optimizer): + clip_grad = self.gradient_clipping() + + if optimizer is None: + optimizer = DummyOptim(list(self.module.parameters())) + + log_dist('Creating FP8 optimizer', ranks=[0]) + + timers = self.timers if self.wall_clock_breakdown() else NoopTimer() + optimizer = FP8_Optimizer(optimizer, + self.param_names, + mpu=self.mpu, + clip_grad=clip_grad, + allgather_bucket_size=self.zero_allgather_bucket_size(), + dp_process_group=self.seq_data_parallel_group, + timers=timers, + grad_acc_dtype=self.get_data_types()[1], + immediate_grad_update=self._config.bfloat16_immediate_grad_update, + master_weights_dtype=self._config.fp8_optimizer_master_weights_dtype) + + return optimizer + def _configure_zero_optimizer(self, optimizer): zero_stage = self.zero_optimization_stage() @@ -2462,6 +2503,17 @@ def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000) if self.has_moe_layers: self._reduce_expert_gradients(expert_grads, elements_per_buffer) + if self.has_sequence_parallel_params: + for i, group in enumerate(self.optimizer.bf16_groups): + if group is None: + dtypes = self.optimizer.optimizer.param_groups[i]['group_dtypes'] + group = self.optimizer.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if getattr(lp, 'sequence_parallel', False): + dist.all_reduce(self.optimizer.fp32_groups_gradients[i][j], + op=dist.ReduceOp.SUM, + group=self.mpu.get_slice_parallel_group()) + def sparse_allreduce_no_retain(self, bucket, dp_group, dp_world_size=None): allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group, dp_world_size) # Densify sparse tensor and copy back to original location @@ -2493,9 +2545,10 @@ def sparse_allreduce(self, sparse, dp_group, dp_world_size=None): dp_world_size = dist.get_world_size(group=dp_group) if self.postscale_gradients(): if self.gradient_average: - values.mul_(self.gradient_predivide_factor() / (dp_world_size)) + + values.mul_(self.gradient_predivide_factor() / (dp_world_size / float(self.sequence_parallel_size))) else: - values.mul_(1. / (dp_world_size)) + values.mul_(1. / (dp_world_size / float(self.sequence_parallel_size))) indices_device_list = self.sparse_all_gather(indices, dp_group) values_device_list = self.sparse_all_gather(values, dp_group) @@ -3604,7 +3657,10 @@ def empty_partition_cache(self): gc.collect() get_accelerator().empty_cache() - def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwargs={}) -> None: + def compile(self, + backend=get_accelerator().get_compile_backend(), + compile_kwargs={}, + compile_optimizer_step=False) -> None: """Compile the module using the specified backend and kwargs. If a compiler_fn is set, it will be used instead of torch.compile(). """ @@ -3616,7 +3672,92 @@ def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwarg self.module.compile(backend=backend, **compile_kwargs) self._is_compiled = True + if compile_optimizer_step: + if not self._is_optimizer_compiled: + self.optimizer.step = torch.compile(self.optimizer.step, backend=backend, **compile_kwargs) + self._is_optimizer_compiled = True @property def is_compiled(self) -> bool: return self._is_compiled + + def optimize_moe(self, step, max_grouped_experts=1): + """ Optimize MoE gate capacity bins + + If MoE is using capacity bins, optimize the bins based on running stats. + In order to reduce the number of compilation recipes, we optimize a set + of grouped gates together. + The grouped gates must have same number of experts. + """ + if not self.has_moe_layers: + return + + # find all gates with capacity factor + gate_with_capacity_bins_idx = [i for i, gate in enumerate(self.gate_modules) if gate.has_capacity_bins()] + if len(gate_with_capacity_bins_idx) == 0: + return + + # handle only gates have capacity bins usage statistics + gate_capacity_bin_stats = OrderedDict() + for i in gate_with_capacity_bins_idx: + gate = self.gate_modules[i] + if hasattr(gate, 'get_stats'): + stats = gate.get_stats(incremental=False) + if stats is not None and 'capacity_bins' in stats: + gate_capacity_bin_stats[i] = stats['capacity_bins'] + if len(gate_capacity_bin_stats) == 0: + return + + del gate_with_capacity_bins_idx # removing the list because it is out of date + + # divide gates into groups up to max_grouped_experts or until different num_experts encountered + gate_groups = [] + first_gate_idx = list(gate_capacity_bin_stats.keys())[0] + current_group = [first_gate_idx] + current_group_n_experts = self.num_experts[first_gate_idx] + for i in list(gate_capacity_bin_stats.keys())[1:]: + if self.num_experts[i] == current_group_n_experts and len(current_group) < max_grouped_experts: + current_group.append(i) + else: + gate_groups.append(current_group) + current_group = [i] + current_group_n_experts = self.num_experts[i] + gate_groups.append(current_group) + + # print new optimized groups for each pipeline stage (no sharing across pp stages) + dp_rank = dist.get_rank(group=self.mpu.get_data_parallel_group()) + tp_rank = bwc_tensor_model_parallel_rank(self.mpu) + log_ranks = [self.global_rank] if dp_rank == 0 and tp_rank == 0 else [] + + # for each group, (1) accumulate stats (2) calculate optimized capacity and (3) reconfigure bins + for gate_group in gate_groups: + group_stats = [] + for i in gate_group: + group_stats.append(gate_capacity_bin_stats[i]) + + # sanity - verify all gates in groups have same bins edges + bins_edges = [stats['edges'] for stats in group_stats] + same_edges = all(torch.equal(bins_edges[0], tensor) for tensor in bins_edges[1:]) + assert same_edges, f'Got different capacity bin edges for group={gate_group} edges={bins_edges}' + + # accumulate usage + stacked_usage = torch.stack([stats['usage'] for stats in group_stats], dim=0) + total_group_usage = torch.sum(stacked_usage, dim=0) + + # find optimized bins for this group + min_range = group_stats[0]['min_range'] + current_bins = group_stats[0]['edges'] + alignment = group_stats[0]['alignment'] + min_bin_size = group_stats[0]['min_bin_size'] + new_bins = optimize_bins(min_range=min_range, + bins=current_bins, + bins_usage=total_group_usage, + alignment=alignment, + min_bin_size=min_bin_size) + + # configure gates in group with new bins + for i in gate_group: + gate = self.gate_modules[i] + capacity_bins = gate.get_capacity_bins() + capacity_bins.set_bins(new_bins) + log_dist(f'step={step}, optimize capacity bins for group={gate_group} bins={new_bins}', ranks=log_ranks) diff --git a/deepspeed/runtime/fp8_optimizer.py b/deepspeed/runtime/fp8_optimizer.py new file mode 100644 index 000000000000..5e2ca0a27d62 --- /dev/null +++ b/deepspeed/runtime/fp8_optimizer.py @@ -0,0 +1,804 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from collections import OrderedDict +import torch +import sys +import os +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed import comm as dist +from deepspeed.runtime.constants import PIPE_REPLICATED +from deepspeed.runtime.base_optimizer import ZeROOptimizer +from packaging import version as pkg_version +from deepspeed.git_version_info import version +from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim, + align_dense_tensors, all_gather_dp_groups, is_model_parallel_parameter, + see_memory_usage, graph_process, get_norm_with_moe_layers) +from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address, groups, map_to_flat_opt_states +from deepspeed.moe.utils import is_moe_param, is_moe_param_group +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank +from deepspeed.checkpoint import enable_universal_checkpoint +from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, + SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS, + PARAM_SLICE_MAPPINGS) +from deepspeed.accelerator import get_accelerator + +setattr(sys.modules[__name__], 'fragment_address', fragment_address) + + +class FP8_Optimizer(ZeROOptimizer): + + def __init__(self, + init_optimizer, + param_names, + mpu=None, + clip_grad=0.0, + norm_type=2, + allgather_bucket_size=5000000000, + dp_process_group=None, + timers=None, + grad_acc_dtype=None, + graph_harvesting=False, + immediate_grad_update=False, + has_moe_layers=False, + master_weights_dtype=None): + super().__init__() + see_memory_usage('begin fp8_optimizer', force=True) + self.timers = timers + self.optimizer = init_optimizer + self.param_names = param_names + self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim) + + assert grad_acc_dtype in [torch.float32, torch.bfloat16 + ], f"FP8_Optimizer: Unsupported gradient accumulation data type: {grad_acc_dtype}" + self.grad_acc_dtype = grad_acc_dtype + self.immediate_grad_update = immediate_grad_update + + self.clip_grad = clip_grad + self.norm_type = norm_type + self.mpu = mpu + self.allgather_bucket_size = int(allgather_bucket_size) + self.dp_process_group = dp_process_group + self.dp_rank = dist.get_rank(group=self.dp_process_group) + self.has_moe_layers = has_moe_layers + self.non_expert_gradients = [] + self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] + if self.has_moe_layers: + self._configure_moe_settings() + self.master_weights_dtype = master_weights_dtype + + # Use torch (un)flatten ops + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + + #align nccl all-gather send buffers to 4-bye boundary + self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 + + # Build BF16/FP32 groups + self.bf16_groups = [] + self.bf16_groups_flat = [] + self.bf16_partitioned_groups = [] + + self.fp8_param_groups_dict = {torch.float8_e5m2: {"param_groups": [], "param_groups_flat": [], "param_partitioned_groups": []},\ + torch.float8_e4m3fn: {"param_groups": [], "param_groups_flat": [], "param_partitioned_groups": []}} + + self.fp32_groups_flat_partition = [] + + # Maintain different fp32 gradients views for convenience + self.fp32_groups_gradients = [] + self.fp32_groups_gradient_dict = {} + self.fp32_groups_gradients_flat = [] + self.fp32_groups_actual_gradients_flat = [] + self.fp32_groups_gradient_flat_partition = [] + self.fp32_groups_has_gradients = [] + + self.group_paddings = [] + self.graph_harvesting = graph_harvesting + if self.using_real_optimizer: + self._setup_for_real_optimizer() + + see_memory_usage('end fp8_optimizer', force=True) + + def _configure_moe_settings(self): + assert any( + [is_moe_param_group(group) for group in self.optimizer.param_groups] + ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" + + for i, group in enumerate(self.optimizer.param_groups): + if is_moe_param_group(group): + assert all([is_moe_param(param) + for param in group['params']]), "All params in MoE group must be MoE params" + self.real_dp_process_group[i] = groups._get_expert_data_parallel_group(group['name']) + self.expert_gradients = {} + if self.has_moe_layers: + for key in groups._get_expert_data_parallel_group_dict().keys(): + self.expert_gradients[key] = [] + + def _setup_for_real_optimizer(self): + self.partition_count = [dist.get_world_size(group=pg) for pg in self.real_dp_process_group] + + if self.master_weights_dtype == torch.float16: + self.calculate_fp16_scale = self.optimizer.param_groups[0]['calculate_fp16_scale'] + self.cast_to_fp32 = self.optimizer.param_groups[0]['cast_to_fp32'] + self.cast_to_fp16 = self.optimizer.param_groups[0]['cast_to_fp16'] + for i, param_group in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + see_memory_usage(f'before initializing group {i}', force=True) + + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + + # grab the original list + trainable_parameters = [param for param in param_group['params'] if param.requires_grad] + if self.master_weights_dtype == torch.float16: + assert len(trainable_parameters) == 1, \ + f"should be one param in each group when we use fp16 master weights with per tensor scaling" + if param_group.get('group_dtypes'): + self.bf16_groups.append(None) + self.bf16_groups_flat.append(None) + self.bf16_partitioned_groups.append(None) + self._setup_for_fp8_params(real_dp_world_size, i, param_group, partition_id, trainable_parameters) + else: + for dtype in self.fp8_param_groups_dict: + self.fp8_param_groups_dict[dtype]["param_groups"].append(None) + self.fp8_param_groups_dict[dtype]["param_groups_flat"].append(None) + self.fp8_param_groups_dict[dtype]["param_partitioned_groups"].append(None) + self._setup_for_bf16_params(real_dp_world_size, i, param_group, partition_id, trainable_parameters) + + # update optimizer param groups to reference fp32 params partition + param_group['params'] = [self.fp32_groups_flat_partition[i]] + + see_memory_usage(f'after initializing group {i}', force=True) + + see_memory_usage('before initialize_optimizer', force=True) + self.initialize_optimizer_states() + see_memory_usage('end initialize_optimizer', force=True) + + if self.immediate_grad_update: + self.create_grad_acc_hooks() + + # Need optimizer states initialized before linking lp to optimizer state + self._link_all_hp_params() + self._hp_optimizer_states_linked = False + self._enable_universal_checkpoint() + self._param_slice_mappings = self._create_param_mapping() + + def _setup_for_bf16_params(self, real_dp_world_size, i, param_group, partition_id, trainable_parameters): + self.bf16_groups.append(trainable_parameters) + + # create flat bf16 params + self.bf16_groups_flat.append( + self._flatten_dense_tensors_aligned(self.bf16_groups[i], + self.nccl_start_alignment_factor * real_dp_world_size)) + # Make bf16 params point to flat tensor storage + self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i], flat_tensor=self.bf16_groups_flat[i]) + + # divide flat weights into equal sized partitions + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size + bf16_dp_partitions = [ + self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size) + for dp_index in range(real_dp_world_size) + ] + self.bf16_partitioned_groups.append(bf16_dp_partitions) + + # create fp32 params partition + if self.master_weights_dtype == torch.float16: + partition = bf16_dp_partitions[partition_id].clone().detach() + self.calculate_fp16_scale(partition, group=self.real_dp_process_group[i]) + partition.data = self.cast_to_fp16(partition).data + self.fp32_groups_flat_partition.append(partition) + else: + self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().to( + self.master_weights_dtype).detach()) + self.fp32_groups_flat_partition[i].requires_grad = True + + num_elem_list = [t.numel() for t in self.bf16_groups[i]] + + # create fp32 gradients + fp32_flat_buffer = torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype) + self.fp32_groups_gradients_flat.append(fp32_flat_buffer) + if self.has_moe_layers and is_moe_param_group(param_group): + self.expert_gradients[param_group['name']].append(fp32_flat_buffer) + else: + self.non_expert_gradients.append(fp32_flat_buffer) + + # track individual fp32 gradients for entire model + fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], + num_elem_list=num_elem_list) + self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients + + # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) + length_without_padding = sum(num_elem_list) + self.fp32_groups_actual_gradients_flat.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) + + # flat tensor corresponding to gradient partition + self.fp32_groups_gradient_flat_partition.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) + + # track fp32 gradient updates + self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i])) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.bf16_groups_flat[i].numel() - length_without_padding + else: + padding = 0 + + self.group_paddings.append(padding) + + def _setup_for_fp8_params(self, real_dp_world_size, i, param_group, partition_id, trainable_parameters): + assert param_group.get('calculate_statistics_fn') is not None, \ + "calculate_statistics_fn wasn't provided" + dtypes = param_group['group_dtypes'] + for dtype in dtypes: + assert dtype in [torch.float8_e5m2, torch.float8_e4m3fn], \ + f'{dtype} is not supported' + calculate_statistics_fn = param_group['calculate_statistics_fn'] + calculate_statistics_fn(trainable_parameters) + + for p in trainable_parameters: + p.all_data = {} + for j in range(1, len(dtypes)): + self.fp8_param_groups_dict[dtypes[j]]["param_groups"].append([]) + for p in trainable_parameters: + p.all_data[dtypes[j]] = p.cast_to(dtypes[j], p.data) + self.fp8_param_groups_dict[dtypes[j]]["param_groups"][i].append(p.all_data[dtypes[j]]) + self.fp8_param_groups_dict[dtypes[0]]["param_groups"].append(trainable_parameters) + + first_dtype = True + for dtype in dtypes: + # create flat params + self.fp8_param_groups_dict[dtype]["param_groups_flat"].append( + self._flatten_dense_tensors_aligned(self.fp8_param_groups_dict[dtype]["param_groups"][i], + self.nccl_start_alignment_factor * real_dp_world_size)) + # Make params point to flat tensor storage + self._update_storage_to_flattened_tensor( + tensor_list=self.fp8_param_groups_dict[dtype]["param_groups"][i], + flat_tensor=self.fp8_param_groups_dict[dtype]["param_groups_flat"][i]) + # divide flat weights into equal sized partitions + partition_size = self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].numel() // real_dp_world_size + dp_partitions = [ + self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].narrow(0, dp_index * partition_size, + partition_size) + for dp_index in range(real_dp_world_size) + ] + self.fp8_param_groups_dict[dtype]["param_partitioned_groups"].append(dp_partitions) + if first_dtype: + # create fp32 params partition + if self.master_weights_dtype == torch.float16: + partition = dp_partitions[partition_id].clone().detach() + self.calculate_fp16_scale(partition, group=self.real_dp_process_group[i]) + partition.data = self.cast_to_fp16(partition).data + self.fp32_groups_flat_partition.append(partition) + else: + self.fp32_groups_flat_partition.append(dp_partitions[partition_id].clone().to( + self.master_weights_dtype).detach()) + self.fp32_groups_flat_partition[i].requires_grad = True + + num_elem_list = [t.numel() for t in self.fp8_param_groups_dict[dtype]["param_groups"][i]] + + # create fp32 gradients + fp32_flat_buffer = torch.zeros_like(self.fp8_param_groups_dict[dtype]["param_groups_flat"][i], + dtype=self.grad_acc_dtype) + self.fp32_groups_gradients_flat.append(fp32_flat_buffer) + if self.has_moe_layers and is_moe_param_group(param_group): + self.expert_gradients[param_group['name']].append(fp32_flat_buffer) + else: + self.non_expert_gradients.append(fp32_flat_buffer) + + # track individual fp32 gradients for entire model + fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], + num_elem_list=num_elem_list) + self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients + + # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) + length_without_padding = sum(num_elem_list) + self.fp32_groups_actual_gradients_flat.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) + + # flat tensor corresponding to gradient partition + self.fp32_groups_gradient_flat_partition.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) + + # track fp32 gradient updates + self.fp32_groups_has_gradients.append([False] * + len(self.fp8_param_groups_dict[dtype]["param_groups"][i])) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].numel( + ) - length_without_padding + else: + padding = 0 + + self.group_paddings.append(padding) + + # convert bf16 params to fp8 params + for p in trainable_parameters: + p.all_data[dtype] = p.cast_to(dtype, p.data) + p.data = p.all_data[dtype] + # update flat tensor to flat fp8 params + self.fp8_param_groups_dict[dtype]["param_groups_flat"][i] = self._flatten_dense_tensors_aligned( + self.fp8_param_groups_dict[dtype]["param_groups"][i], + self.nccl_start_alignment_factor * real_dp_world_size) + # update fp8 params point to flat tensor storage + self._update_storage_to_flattened_tensor( + tensor_list=self.fp8_param_groups_dict[dtype]["param_groups"][i], + flat_tensor=self.fp8_param_groups_dict[dtype]["param_groups_flat"][i]) + # divide flat weights into equal sized partitions + partition_size = self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].numel( + ) // real_dp_world_size + dp_partitions = [ + self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].narrow( + 0, dp_index * partition_size, partition_size) for dp_index in range(real_dp_world_size) + ] + self.fp8_param_groups_dict[dtype]["param_partitioned_groups"][i] = dp_partitions + first_dtype = False + + def _enable_universal_checkpoint(self): + for i, lp_param_group in enumerate(self.bf16_groups): + if lp_param_group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + lp_param_group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + enable_universal_checkpoint(param_list=lp_param_group) + + def _create_param_mapping(self): + param_mapping = [] + for i, param_group in enumerate(self.optimizer.param_groups): + param_mapping_per_group = OrderedDict() + if param_group.get('group_dtypes'): + dtypes = param_group['group_dtypes'] + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + params = self.bf16_groups[i] + for lp in params: + if lp._hp_mapping is not None: + lp_name = self.param_names[lp] + param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address() + param_mapping.append(param_mapping_per_group) + + return param_mapping + + def _link_all_hp_params(self): + for i, param_group in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + + # Link bf16 and fp32 params in partition + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if param_group.get('group_dtypes'): + dtypes = param_group.get('group_dtypes') + partition_size = self.fp8_param_groups_dict[ + dtypes[0]]["param_groups_flat"][i].numel() // real_dp_world_size + lp_param_list = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size + lp_param_list = self.bf16_groups[i] + flat_hp_partition = self.fp32_groups_flat_partition[i] + link_hp_params(lp_param_list=lp_param_list, + flat_hp_partition=flat_hp_partition, + gradient_dict=self.fp32_groups_gradient_dict, + offload_gradient_dict=None, + use_offload=False, + param_group_index=i, + partition_start=partition_id * partition_size, + partition_size=partition_size, + dp_group=self.real_dp_process_group[i]) + + def _lazy_init_hp_params_optimizer_state(self): + if not self._hp_optimizer_states_linked: + for i, param_group in enumerate(self.optimizer.param_groups): + if param_group.get('group_dtypes'): + dtypes = param_group.get('group_dtypes') + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + params = self.bf16_groups[i] + lazy_init_hp_params_optimizer_state(params, self.fp32_groups_flat_partition[i], self.optimizer.state) + self._hp_optimizer_states_linked = True + + def initialize_optimizer_states(self): + """Take an optimizer step with zero-valued gradients to allocate internal + optimizer state. + + This helps prevent memory fragmentation by allocating optimizer state at the + beginning of training instead of after activations have been allocated. + """ + self.clear_hp_grads() + + def _split_flat_tensor(self, flat_tensor, num_elem_list): + assert sum(num_elem_list) <= flat_tensor.numel() + tensor_list = [] + offset = 0 + for num_elem in num_elem_list: + dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem) + tensor_list.append(dense_tensor) + offset += num_elem + + return tensor_list + + def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor): + updated_params = self.unflatten(flat_tensor, tensor_list) + # TODO: SW-179781 need to remove the below WA once SW-179780 is resolved + get_accelerator().synchronize() + for p, q in zip(tensor_list, updated_params): + p.data = q.data + if hasattr(p, 'all_data') and p.dtype in p.all_data: + p.all_data[p.dtype] = q.data + + def _flatten_dense_tensors_aligned(self, tensor_list, alignment): + return self.flatten(align_dense_tensors(tensor_list, alignment)) + + @torch.no_grad() + def step(self, closure=None): + if closure is not None: + raise NotImplementedError(f'{self.__class__} does not support closure.') + + non_expert_grads_for_norm, expert_grads_for_norm = self.get_grads_for_norm() + non_expert_groups_norm = get_global_norm_of_tensors(input_tensors=non_expert_grads_for_norm, + mpu=self.mpu, + norm_type=self.norm_type, + use_graph=self.graph_harvesting) + all_groups_norm = non_expert_groups_norm + if self.has_moe_layers: + all_groups_norm = get_norm_with_moe_layers(non_expert_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) + + self._global_grad_norm = all_groups_norm + + assert all_groups_norm > 0. + if self.clip_grad > 0.: + clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True), + max_norm=self.clip_grad, + global_norm=all_groups_norm, + mpu=self.mpu, + use_graph=self.graph_harvesting) + + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + if self.master_weights_dtype is torch.float16: + param_partition.prev = param_partition.data + param_partition.data = self.cast_to_fp32(param_partition) + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + + self.optimizer.step() + + # We need to link optimizer state after the first step() call + self._lazy_init_hp_params_optimizer_state() + + self.update_lp_params() + + if self.master_weights_dtype is torch.float16: + for i, param_partition in enumerate(self.fp32_groups_flat_partition): + self.calculate_fp16_scale(param_partition, group=self.real_dp_process_group[i]) + param_partition.prev.copy_(self.cast_to_fp16(param_partition)) + param_partition.data = param_partition.prev + + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + + self.clear_hp_grads() + + def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs): + """Perform a backward pass and copy the low-precision gradients to the + high-precision copy. + + If self.immediate_grad_update is false and update_hp_grads is true we copy/accumulate to the high-precision grads now + to prevent accumulating in the bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1) + + The low-precision grads are deallocated during this procedure. + """ + self.clear_lp_grads() + loss.backward(**bwd_kwargs) + + if not self.immediate_grad_update and update_hp_grads: + self.update_hp_grads(clear_lp_grads=clear_lp_grads) + + @torch.no_grad() + def _update_hp_grad(self, lp, group_idx, param_idx, clear_lp_grads): + if lp.grad is None: + return + + hp_grad = self.fp32_groups_gradients[group_idx][param_idx] + assert hp_grad is not None, \ + f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{group_idx}][{param_idx}]' + + if hasattr(lp, 'hp_grad'): + grad = lp.hp_grad + else: + grad = lp.grad + hp_grad.data.add_(grad.data.to(hp_grad.dtype).view(hp_grad.shape)) + lp._hp_grad = hp_grad + self.fp32_groups_has_gradients[group_idx][param_idx] = True + + # clear gradients + if hasattr(lp, 'hp_grad'): + lp.hp_grad = None + lp.grad = None + elif clear_lp_grads: + lp.grad = None + + @torch.no_grad() + def _update_hp_grads_func(self, clear_lp_grads=False): + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + self._update_hp_grad(lp, i, j, clear_lp_grads) + + @torch.no_grad() + def update_hp_grads(self, clear_lp_grads=False): + if self.immediate_grad_update: + return + + if self.graph_harvesting: + graph_process(False, self._update_hp_grads_func, clear_lp_grads) + else: + self._update_hp_grads_func(clear_lp_grads) + #cpu op + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if lp.grad is None: + continue + self.fp32_groups_has_gradients[i][j] = True + + @torch.no_grad() + def get_grads_for_reduction(self): + if self.has_moe_layers: + return self.non_expert_gradients, self.expert_gradients + return self.non_expert_gradients, {} + + @torch.no_grad() + def get_grads_for_norm(self, for_clipping=False): + """ + Returns: + tuple[list[Tensor], dict[ep_name, List[Tensor]] | list: + If for_clipping, return all gradients. + Otherwise, separate and return dict of expert_grad and list of non_expert_grad + """ + # (grads, expert_group_name) + expert_grads_for_norm = {} + + # grads + non_expert_grads_for_norm = [] + all_grads_for_clip = [] + + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + assert len(self.bf16_groups) == len(self.optimizer.param_groups) + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if not for_clipping: + if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated: + continue + + # skip duplicated parameters. perform norm only on cards with tp_rank=0. + # non-duplicated parameters include: + # - Parameters with tp: Use allreducesum of mp_group. + # - Moe Parameters with ep: Use allreducesum of ep_group. + if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp) or is_moe_param(lp)): + continue + + if not self.fp32_groups_has_gradients[i][j]: + continue + if not for_clipping: + param_group = self.optimizer.param_groups[i] + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_gradients[i][j]) + else: + non_expert_grads_for_norm.append(self.fp32_groups_gradients[i][j]) + else: + all_grads_for_clip.append(self.fp32_groups_gradients[i][j]) + if not for_clipping: + return non_expert_grads_for_norm, expert_grads_for_norm + return all_grads_for_clip + + @torch.no_grad() + def update_lp_params(self): + dtypes = [] + for i, (partition_group, + fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if partition_group is None: + calculate_statistics_fn = self.optimizer.param_groups[i]['calculate_statistics_fn'] + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + calculate_statistics_fn(params) + for p in params: + if p._hp_mapping is not None: + for dtype in dtypes: + fp8_partition_data_temp = p.cast_to(dtype, p._hp_mapping.hp_fragment.data) + p.all_data[dtype].flatten().narrow( + 0, p._hp_mapping.lp_fragment_address.start, + p._hp_mapping.lp_fragment_address.numel).copy_(fp8_partition_data_temp) + else: + partition_group[partition_id].data.copy_(fp32_partition.data) + # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) + # if i == 0: + # print_rank_0(f'{fp32_partition[:10]=}', force=True) + + #TODO: SW-90304 call all_gather_dp_groups with async_op=true if zero optimizer hpu_use_async_collectives is enabled + if any(g is not None for g in self.bf16_groups_flat): + all_gather_dp_groups(groups_flat=self.bf16_groups_flat, + partitioned_param_groups=self.bf16_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + for dtype in dtypes: + param_groups_flat = self.fp8_param_groups_dict[dtype]["param_groups_flat"] + param_partitioned_groups = self.fp8_param_groups_dict[dtype]["param_partitioned_groups"] + if any(g is not None for g in param_groups_flat): + all_gather_dp_groups(groups_flat=param_groups_flat, + partitioned_param_groups=param_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + + def clear_hp_grads(self): + for flat_gradients in self.fp32_groups_gradients_flat: + flat_gradients.zero_() + + for i, group in enumerate(self.fp32_groups_gradients): + self.fp32_groups_has_gradients[i] = [False] * len(group) + + def clear_lp_grads(self): + + # using zero_() fixed memory address for graph replay + set_to_none = False if self.graph_harvesting else True + zero_grads_list = [] + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for param in group: + if set_to_none: + param.grad = None + if hasattr(param, 'hp_grad'): + param.hp_grad = None + elif param.grad is not None: + if param.grad.grad_fn is not None: + param.grad.detach_() + zero_grads_list.append(param.grad) + if not set_to_none and len(zero_grads_list) > 0: + torch._foreach_zero_(zero_grads_list) + + def state_dict(self): + state_dict = {} + state_dict[CLIP_GRAD] = self.clip_grad + state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() + state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition + state_dict[GROUP_PADDINGS] = self.group_paddings + state_dict[PARTITION_COUNT] = self.partition_count + state_dict[DS_VERSION] = version + state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings + + return state_dict + + # Restore base optimizer fp32 weights from bfloat16 weights + def _restore_from_bit16_weights(self): + for i, group in enumerate(self.bf16_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + for j, (partitioned_group, + fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): + if partitioned_group is None: + dtypes = self.optimizer.param_groups[j]['group_dtypes'] + partitioned_group = self.fp8_param_groups_dict[dtypes[0]]["param_partitioned_groups"][j] + fp32_partition.data.copy_(partitioned_group[partition_id].data) + + def refresh_fp32_params(self): + self._restore_from_bit16_weights() + + def load_state_dict(self, + state_dict_list, + checkpoint_folder, + load_optimizer_states=True, + load_from_fp32_weights=False, + load_serial=None): + if checkpoint_folder: + self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights) + else: + self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) + + def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False): + + dp_rank = dist.get_rank(group=self.dp_process_group) + current_rank_sd = state_dict_list[dp_rank] + + ckpt_version = current_rank_sd.get(DS_VERSION, False) + assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed" + ckpt_version = pkg_version.parse(ckpt_version) + + self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) + + if load_optimizer_states: + self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) + + if load_from_fp32_weights: + for current, saved in zip(self.fp32_groups_flat_partition, + current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]): + src_tensor = _get_padded_tensor(saved, current.numel()) + current.data.copy_(src_tensor.data) + + if load_optimizer_states: + self._link_all_hp_params() + + def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): + self._load_hp_checkpoint_state(checkpoint_folder) + + @property + def param_groups(self): + """Forward the wrapped optimizer's parameters.""" + return self.optimizer.param_groups + + def _load_hp_checkpoint_state(self, checkpoint_dir): + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + tp_world_size = self.mpu.get_slice_parallel_world_size() + + for i, param_group in enumerate(self.optimizer.param_groups): + # We have an assumption that all params in the same param_group have the same keys + opt_keys = set() + if param_group.get('group_dtypes'): + dtypes = param_group['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + group = self.bf16_groups[i] + + for lp in group: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, + tp_world_size) + for key in lp._hp_mapping.get_optim_state_keys(): + opt_keys.add(key) + map_to_flat_opt_states(param_group['params'][0], group, self.optimizer.state, opt_keys) + + def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): + assert self.immediate_grad_update + self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=False) + + def create_grad_acc_hooks(self): + self.grad_accs = [] + for i, param_group in enumerate(self.bf16_groups): + if param_group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + param_group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, param in enumerate(param_group): + if param.requires_grad: + + def wrapper(param, i, j): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + def accumulate_hp_grads_and_remove_lp(*notneeded): + self.accumulate_hp_grads_and_remove_lp(param, i, j) + + grad_acc.register_hook(accumulate_hp_grads_and_remove_lp) + self.grad_accs.append(grad_acc) + + wrapper(param, i, j) + + +def _get_padded_tensor(src_tensor, size): + if src_tensor.numel() >= size: + return src_tensor + padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device) + slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) + slice_tensor.data.copy_(src_tensor.data) + return padded_tensor diff --git a/deepspeed/runtime/hpu_utils.py b/deepspeed/runtime/hpu_utils.py new file mode 100644 index 000000000000..5a7ca4d4b97f --- /dev/null +++ b/deepspeed/runtime/hpu_utils.py @@ -0,0 +1,11 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.accelerator import get_accelerator + + +def get_use_hpu(): + return get_accelerator().device_name() == "hpu" diff --git a/deepspeed/runtime/hybrid_engine.py b/deepspeed/runtime/hybrid_engine.py index a991c4304563..8a6311bb6e83 100644 --- a/deepspeed/runtime/hybrid_engine.py +++ b/deepspeed/runtime/hybrid_engine.py @@ -17,16 +17,14 @@ from deepspeed.accelerator import get_accelerator from torch import nn from deepspeed.utils import logger - -from deepspeed.ops.op_builder import InferenceBuilder - from deepspeed.module_inject.layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding +from ..ops.transformer.inference.op_binding.workspace import WorkspaceOp + try: import transformers OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding except: OPTLearnedPositionalEmbedding = None -inference_cuda_module = None class DeepSpeedHybridEngine(DeepSpeedEngine): @@ -61,12 +59,8 @@ def __init__(self, args, model, **kwargs): self._total_batch_size = None self._gather_latency = 0 - global inference_cuda_module - if inference_cuda_module is None: - builder = InferenceBuilder() - inference_cuda_module = builder.load() - self.is_lora_fused = False + self.workspace = WorkspaceOp() def convert_to_linear_transposed(self, model): @@ -160,13 +154,13 @@ def unfuse_lora_weight_non_pinned(self): def retake_inference_cache(self): if self._config.hybrid_engine.release_inference_cache: - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: logger.warning("Unable to acquire workspace on first attempt, emptying cache and retrying.") gc.collect() get_accelerator().empty_cache() - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: raise RuntimeError("Unable to retake inference workspace.") @@ -269,7 +263,7 @@ def generate(self, *inputs, **kwargs): self.is_lora_fused = False if self._config.hybrid_engine.release_inference_cache: - inference_cuda_module.release_workspace() + self.workspace.release_workspace() gc.collect() get_accelerator().empty_cache() diff --git a/deepspeed/runtime/lr_schedules.py b/deepspeed/runtime/lr_schedules.py index d7f7e15a4dbd..e0554b9709e0 100755 --- a/deepspeed/runtime/lr_schedules.py +++ b/deepspeed/runtime/lr_schedules.py @@ -13,6 +13,7 @@ from torch.optim import Optimizer import math from deepspeed.utils import logger +from torch import tensor, is_tensor LR_SCHEDULE = 'lr_schedule' LR_RANGE_TEST = 'LRRangeTest' @@ -694,6 +695,9 @@ def step(self, last_batch_iteration=None): last_batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = last_batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + # new LR should match the type of current LR for scalar and Tensor LR support + if is_tensor(param_group['lr']): + lr = tensor(lr, device=param_group['lr'].device) param_group['lr'] = lr self._last_lr = [group['lr'] for group in self.optimizer.param_groups] diff --git a/deepspeed/runtime/pipe/engine.py b/deepspeed/runtime/pipe/engine.py index 9e84121d50fa..f2a67dd30991 100644 --- a/deepspeed/runtime/pipe/engine.py +++ b/deepspeed/runtime/pipe/engine.py @@ -13,6 +13,7 @@ from deepspeed.utils.timer import ThroughputTimer from deepspeed.accelerator import get_accelerator from deepspeed.runtime.bf16_optimizer import BF16_Optimizer +from deepspeed.runtime.fp8_optimizer import FP8_Optimizer from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE from deepspeed.utils.timer import FORWARD_MICRO_TIMER, FORWARD_GLOBAL_TIMER, BACKWARD_MICRO_TIMER, \ @@ -78,7 +79,7 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): self.eval_return_logits = False self.outputs = None # BF16 Optimizer is hardcoded for fp32 gradient accumulation - self.using_bf16_optimizer = type(self.optimizer) == BF16_Optimizer + self.using_bf16_optimizer = type(self.optimizer) in [BF16_Optimizer, FP8_Optimizer] # used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB self.pipeline_enable_backward_allreduce = True @@ -151,11 +152,33 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): if self.global_rank != min(d['ranks']): tied_params += sum(p.numel() for p in d['module'].parameters()) unique_params -= tied_params - params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device) + + # Use Int32 representation instead of Int64 for calclations. + # num_param division & modulo after all reduce should be lower than MAX Int32. + # Using this value will be safe if used with less than ~2000 devices. + # Int32Max > all_reduce_group*chunk_size + chunk_size = 10**6 + + num_params_quotient = num_params // chunk_size + num_params_remainder = num_params % chunk_size + + unique_params_quotient = unique_params // chunk_size + unique_params_remainder = unique_params % chunk_size + + assert (unique_params_quotient * chunk_size + + unique_params_remainder) == unique_params, "Value mismatch after Int64 splitting" + assert (num_params_quotient * chunk_size + + num_params_remainder) == num_params, "Value mismatch after Int64 splitting" + + params_tensor = torch.IntTensor( + data=[num_params_quotient, num_params_remainder, unique_params_quotient, unique_params_remainder]).to( + self.device) + dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group()) params_tensor = params_tensor.tolist() - total_params = params_tensor[0] - unique_params = params_tensor[1] + total_params = params_tensor[0] * chunk_size + params_tensor[1] + unique_params = params_tensor[2] * chunk_size + params_tensor[3] + if self.grid.data_parallel_id == 0: logger.info(f'RANK={self.global_rank} ' f'STAGE={self.stage_id} ' @@ -213,6 +236,8 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): self.module.activation_checkpoint_func = ds_checkpointing.non_reentrant_checkpoint if self.grid.get_global_rank() == 0: logger.info(f'CONFIG: activation_checkpoint_func=non_reentrant_checkpoint') + if self.module.activation_checkpoint_interval > 0: + self.module._precompute_checkpointable_values() self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline @@ -471,9 +496,7 @@ def eval_batch(self, micro_batches = self.micro_batches if num_micro_batches is None else num_micro_batches # Do the work - sched = schedule.InferenceSchedule(micro_batches=self.micro_batches, - stages=self.num_stages, - stage_id=self.stage_id) + sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=self.num_stages, stage_id=self.stage_id) # prevent dead-lock with multiple evals sequence dist.barrier() diff --git a/deepspeed/runtime/pipe/module.py b/deepspeed/runtime/pipe/module.py index 8036faef72ee..e64f801b1325 100644 --- a/deepspeed/runtime/pipe/module.py +++ b/deepspeed/runtime/pipe/module.py @@ -196,6 +196,16 @@ def __init__(self, #newseed = get_accelerator().initial_seed() + self._grid.get_stage_id() #ds_utils.set_random_seed(newseed) + self.activation_checkpoint_interval = activation_checkpoint_interval + + self.activation_checkpoint_func = activation_checkpoint_func + + #storage for precomputed checkpointeble results + self.is_checkpointable_results = [] + self.is_checkpointable_results_interval = None + + # if configuration use_reentrant = False, self.activation_checkpoint_func will be set to ``checkpointing.non_reentrant_checkpoint`` + #with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]): self._build() self.to(get_accelerator().device_name(self.local_rank)) @@ -203,10 +213,15 @@ def __init__(self, self.tied_comms = self._index_tied_modules() self._synchronize_tied_weights() - self.activation_checkpoint_interval = activation_checkpoint_interval - - self.activation_checkpoint_func = activation_checkpoint_func - # if configuration use_reentrant = False, self.activation_checkpoint_func will be set to ``checkpointing.non_reentrant_checkpoint`` + def _precompute_checkpointable_values(self): + if self.activation_checkpoint_interval > 0 and self.is_checkpointable_results_interval != self.activation_checkpoint_interval: + num_layers = len(self.forward_funcs) + self.interval_was_zero = False + for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) + funcs = self.forward_funcs[start_idx:end_idx] + self.is_checkpointable_results.append(self._is_checkpointable(funcs)) + self.is_checkpointable_results_interval = self.activation_checkpoint_interval def _build(self): specs = self._layer_specs @@ -352,7 +367,9 @@ def exec_func(*inputs): else: num_layers = len(self.forward_funcs) x = forward_input - for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + for start_idx, is_checkpointable_result in \ + zip(range(0, num_layers, self.activation_checkpoint_interval), self.is_checkpointable_results): + end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) funcs = self.forward_funcs[start_idx:end_idx] @@ -361,7 +378,7 @@ def exec_func(*inputs): if not isinstance(x, tuple): x = (x, ) - if self._is_checkpointable(funcs): + if is_checkpointable_result: x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x) else: x = exec_range_func(start_idx, end_idx)(*x) @@ -641,3 +658,12 @@ def get_additional_losses(self): Return a dictionary of {"loss name": loss_value} or None if no additional losses. """ return None + + #TODO(SW-198696) remove workaround for micro offset torch compile issue + def compile(self, *args, **kwargs): + for idx, layer in enumerate(self.forward_funcs): + if isinstance(layer, nn.Module): + layer.compile(*args, **kwargs) + else: + new_layer = torch.compile(layer, *args, **kwargs) + self.forward_funcs[idx] = new_layer diff --git a/deepspeed/runtime/state_dict_factory.py b/deepspeed/runtime/state_dict_factory.py index c2db85d1ba58..469e2fe9f55f 100755 --- a/deepspeed/runtime/state_dict_factory.py +++ b/deepspeed/runtime/state_dict_factory.py @@ -335,10 +335,26 @@ def merge_state_dict(self, new_client_sd[key] = torch.cat(value_list, axis=0) else: new_client_sd[key] = self.merge_query_key_value(value_list, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key: - if quantize and "mlp.dense_h_to_4h.weight" in key: - value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "lm_head.weight" in key: new_client_sd[key] = torch.cat(value_list, axis=0) + elif "mlp.dense_h_to_4h.weight" in key: + if quantize: + value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + # HACK: + # Following code checks if h_to_4h is swiglu. This is required in order to merge correctly. + # The correct way is to add metadata to state_dict that provides info on how to merge/split each tensor. + size_h_to_4h = sd_list[0]["mlp.dense_h_to_4h.weight"].numel() + size_4h_to_h = sd_list[0]["mlp.dense_4h_to_h.weight"].numel() + if size_h_to_4h == size_4h_to_h: + new_client_sd[key] = torch.cat(value_list, axis=0) + elif size_h_to_4h == 2 * size_4h_to_h: + chunked_slices = [torch.chunk(v, 2, dim=0) for v in value_list] + merged_chunks_0 = torch.cat([s[0] for s in chunked_slices], dim=0) + merged_chunks_1 = torch.cat([s[1] for s in chunked_slices], dim=0) + new_client_sd[key] = torch.cat([merged_chunks_0, merged_chunks_1], dim=0) + else: + assert False, f"Unsupported slices size of mlp.dense_h_to_4h.weight={size_h_to_4h} " \ + f"mlp.dense_4h_to_h.weight={size_4h_to_h}" else: new_client_sd[key] = value_list[0] if quantize: @@ -383,12 +399,27 @@ def split_state_dict(self, q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] new_client_sd[key] = self.split_query_key_value(value, num_to_split, ckpt_offset, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key: + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key \ + or "lm_head.weight" in key: assert value.shape[0] % num_to_split == 0 split_size = value.shape[0] // num_to_split - if quantize and "mlp.dense_h_to_4h.weight" in key: + new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] + elif "mlp.dense_h_to_4h.weight" in key: + assert value.shape[0] % num_to_split == 0 + split_size = value.shape[0] // num_to_split + if quantize: q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] + # HACK: + # Following code checks if h_to_4h is swiglu. + # The correct way to check is to add metadata to state_dict that provides info on + # how to merge/split each tensor. + # Currently, swiglu split is NOT supported as it requires handling of all chunks. + size_h_to_4h = value.numel() + size_4h_to_h = client_sd["mlp.dense_4h_to_h.weight"].numel() + assert size_h_to_4h == size_4h_to_h, \ + f"Split not supported dense_h_to_4h.weight size={size_h_to_4h} " \ + f"and dense_4h_to_h.weight size={size_4h_to_h}" new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] else: new_client_sd[key] = value diff --git a/deepspeed/runtime/utils.py b/deepspeed/runtime/utils.py index 2c01c3475a70..441be5c9afcb 100755 --- a/deepspeed/runtime/utils.py +++ b/deepspeed/runtime/utils.py @@ -953,6 +953,8 @@ def align_dense_tensors(tensor_list, alignment): def all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group): for group_id, (group_flat, partitioned_params) in enumerate(zip(groups_flat, partitioned_param_groups)): + if group_flat is None: + continue partition_id = dist.get_rank(group=dp_process_group[group_id]) dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) if dp_world_size == 1: @@ -968,6 +970,8 @@ def all_gather_dp_groups(groups_flat, partitioned_param_groups, dp_process_group return all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group) for group_id, partitioned_params in enumerate(partitioned_param_groups): + if partitioned_params is None: + continue # Sequential AllGather Best of both worlds partition_id = dist.get_rank(group=dp_process_group[group_id]) dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) diff --git a/deepspeed/runtime/zero/config.py b/deepspeed/runtime/zero/config.py index 2089d59dbce4..410bfc8c82a4 100644 --- a/deepspeed/runtime/zero/config.py +++ b/deepspeed/runtime/zero/config.py @@ -246,6 +246,12 @@ class DeepSpeedZeroConfig(DeepSpeedConfigModel): new_param="gather_16bit_weights_on_model_save") """ Deprecated, please use ``gather_16bit_weights_on_model_save`` """ + use_all_reduce_for_fetch_params: bool = Field(False, alias="stage3_use_all_reduce_for_fetch_params") + """ + Use all_reduce op when fetching module parameters at stage3. This allows to significantly improve + performance by reducing the overhead of concatenation and slicing on the host. + """ + ignore_unused_parameters: bool = True """ Unused parameters in modules may be unexpected in static networks, but diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index 3f43e865fa72..aae6c84ec2e3 100644 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -15,7 +15,7 @@ from deepspeed.utils import logger from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce -from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter, get_only_unique_item +from deepspeed.runtime.utils import inf, is_model_parallel_parameter, get_only_unique_item from deepspeed.runtime.zero.partition_parameters import * from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum @@ -215,14 +215,12 @@ def __init__( self.module = module self.elastic_checkpoint = elastic_checkpoint - self.inf_or_nan_tracker: Tensor = torch.zeros(1, - dtype=torch.bool, - device=get_accelerator().current_device_name(), - requires_grad=False) + self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu + + self.inf_or_nan_tracker: Tensor = torch.zeros(1, dtype=torch.bool, device=self.device, requires_grad=False) self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam) - self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu ### streams used for overlapping computation with communication self.reduce_and_partition_stream = None if get_accelerator().is_synchronized_device() else get_accelerator( ).Stream() if overlap_comm else get_accelerator().default_stream() @@ -1412,7 +1410,7 @@ def complete_grad_norm_calculation_for_cpu_offload(self, params): err = torch.tensor(-1.0, device=inf_or_nan.device, dtype=torch.float) total_norm = inf_or_nan * err + inf_or_nan.logical_not() * total_norm - return total_norm + return total_norm.cpu() @instrument_w_nvtx def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: @@ -2027,7 +2025,7 @@ def step(self, closure=None): return norm_groups = self._get_norm_groups() - scaled_global_grad_norm = get_global_norm(norm_list=norm_groups) + scaled_global_grad_norm = torch.norm(torch.stack(norm_groups)) # Stash unscaled gradient norm self._global_grad_norm = scaled_global_grad_norm / self.loss_scale @@ -2111,8 +2109,8 @@ def unscale_and_clip_grads(self, sub_group_id, total_norm): if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad - if clip > 1: - combined_scale = clip * self.loss_scale + clip = torch.clamp(clip, min=1.0) + combined_scale = clip * self.loss_scale self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale) @@ -2147,7 +2145,8 @@ def has_overflow(self, partition_gradients=True): self.inf_or_nan_tracker += torch.isnan(self.grad_partitions_flat_buffer).any() self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0 - overflow_gpu = self.inf_or_nan_tracker.clone().to(torch.uint8) + overflow_gpu = self.inf_or_nan_tracker.clone().to(get_accelerator().current_device_name()).to( + torch.uint8) self.inf_or_nan_tracker.zero_() if not get_accelerator().resolves_data_dependency(): @@ -2381,9 +2380,10 @@ def _set_param_groups(self, value): # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: - return self.external_loss_scale + # TODO: SW-187114 Remove WA: cast self.loss_scale to float + return float(self.external_loss_scale) else: - return self.loss_scaler.cur_scale + return float(self.loss_scaler.cur_scale) def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value diff --git a/deepspeed/runtime/zero/stage_1_and_2.py b/deepspeed/runtime/zero/stage_1_and_2.py index 7c8ae8e67b78..709317255777 100755 --- a/deepspeed/runtime/zero/stage_1_and_2.py +++ b/deepspeed/runtime/zero/stage_1_and_2.py @@ -2087,10 +2087,11 @@ def _set_param_groups(self, value): # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): + # TODO: SW-187114 Remove WA: cast self.loss_scale to float if self.custom_loss_scaler: - return self.external_loss_scale + return float(self.external_loss_scale) else: - return self.loss_scaler.cur_scale + return float(self.loss_scaler.cur_scale) def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value diff --git a/deepspeed/tools/__init__.py b/deepspeed/tools/__init__.py new file mode 100644 index 000000000000..36d8fbe03558 --- /dev/null +++ b/deepspeed/tools/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/deepspeed/tools/pg_sim/__init__.py b/deepspeed/tools/pg_sim/__init__.py new file mode 100644 index 000000000000..917c5a255fa8 --- /dev/null +++ b/deepspeed/tools/pg_sim/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .pg import install_sim_dist_backend diff --git a/deepspeed/tools/pg_sim/pg.py b/deepspeed/tools/pg_sim/pg.py new file mode 100644 index 000000000000..b7e2483f86d0 --- /dev/null +++ b/deepspeed/tools/pg_sim/pg.py @@ -0,0 +1,306 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import warnings +import torch +import torch.distributed as dist + +from datetime import timedelta +from functools import partial +from torch.distributed.constants import default_pg_timeout +from torch.distributed.distributed_c10d import (GroupMember, BroadcastOptions, AllreduceOptions, ReduceOp) +from torch.distributed.distributed_c10d import STORE_BASED_BARRIER_PREFIX # noqa +from deepspeed.accelerator import get_accelerator + + +class SimProcessGroup: + BACKEND = "sim" + DEFAULT_PG = None + WORLD_SIZE = -1 + STORE = None + + def __init__(self, rank, world_size, timeout, backend): + self.sim_rank = rank + self.pg_world_size = world_size + self.timeout = timeout + self.backend = backend + self.pg = None + self.torch_ver_major = int(torch.__version__.split('.')[0]) + self.torch_ver_minor = int(torch.__version__.split('.')[1]) + + assert self.torch_ver_major == 1, \ + f"Torch version major != 1 is not supported (version={torch.__version__})" + assert self.torch_ver_minor >= 10, \ + f"Torch version < 1.10 is not supported (version={torch.__version__})" + + if self.torch_ver_minor < 13: + warnings.warn(f"Torch version < 1.13 is not tested (version={torch.__version__})") + + # default is the first process group created + if SimProcessGroup.DEFAULT_PG is None: + SimProcessGroup.DEFAULT_PG = self + + @staticmethod + def get_dist_group_count(): + return torch.distributed.distributed_c10d._group_count + + @classmethod + def store_add_rest_of_world(cls, next_group): + group = cls.get_dist_group_count() + (1 if next_group else 0) + store_key = f"{STORE_BASED_BARRIER_PREFIX}:{group}" + cls.STORE.add(store_key, cls.WORLD_SIZE - 1) + + def _create_pg(self): + self.store_add_rest_of_world(next_group=False) + pg = dist.new_group(ranks=[0], timeout=self.timeout, backend=self.backend, pg_options=None) + return pg + + def post_create_sim_group(self): + self.pg = self._create_pg() + + @classmethod + def default_pg(cls): + assert cls.DEFAULT_PG is not None + return cls.DEFAULT_PG + + def size(self): + return self.pg_world_size + + def rank(self): + return self.sim_rank + + # ---------------------------------------------------- + # P2P + # + # P2P operations are simulated as all_reduce + # ---------------------------------------------------- + class P2PRequestObject: + """ Dummy p2p request object that is returned for p2p ops""" + + def __init__(self, src): + self.src = src + + def wait(self): + return + + def is_completed(self): + return True + + def _source_rank(self): + return self.src + + def _p2p_op(self, tensor_list, src=None): + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + self.pg.allreduce(tensor_list, opts).wait() + src = src if src is not None else self.sim_rank + return SimProcessGroup.P2PRequestObject(src=src) + + def send(self, tensor_list, _group_dst_rank, _tag): + return self._p2p_op(tensor_list) + + def recv_anysource(self, tensor_list, _tag): + return self._p2p_op(tensor_list) + + def recv(self, tensor_list, src, _tag): + return self._p2p_op(tensor_list, src=src) + + # ---------------------------------------------------- + # Collectives + # + # For some collectives, it is required to shrink the + # input/output tensors_list to 1-element (world_size=1). + # also, need to make all other members of tensors_list to depend + # on the first element - to prevent incorrect graph signaling. + # The logic of shrink and then copy is handled by: + # - _adjust_tensors_list_to_ws1 + # - _copy_data_from_tensor_to_tensor_list + # ---------------------------------------------------- + @staticmethod + def _to_device(tensors, device): + if isinstance(tensors, dict): + return {k: SimProcessGroup._to_device(v, device) for k, v in tensors.items()} + elif isinstance(tensors, list): + return [SimProcessGroup._to_device(v, device) for v in tensors] + elif isinstance(tensors, torch.Tensor): + return tensors.to(device) + else: + assert False, 'Unsupported tensors type' + + def broadcast(self, tensors, opts): + """ ignore opts.rootRank and override to be the source """ + opts.rootRank = self.sim_rank + tensors = self._to_device(tensors, get_accelerator().current_device_name()) + return self.pg.broadcast(tensors, opts) + + def allreduce(self, tensors, opts): + return self.pg.allreduce(tensors, opts) + + def allreduce_coalesced(self, tensors, opts): + return self.pg.allreduce_coalesced(tensors, opts) + + def reduce(self, tensors, opts): + if opts.rootRank == self.sim_rank: + return self.pg.reduce(tensors, opts) + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + broadcast_opts.rootTensor = opts.rootTensor + return self.pg.broadcast(tensors, broadcast_opts) + + def _adjust_tensors_list_to_ws1(self, tensors_list): + """ receives list of lists of tensors and returns lists + of list-size-1 to match the world_size=1 + """ + world1_tensors_list = [] + for i, tensors in enumerate(tensors_list): + world1_tensors_list.append(tensors[self.sim_rank:self.sim_rank + 1]) + return world1_tensors_list + + @staticmethod + def _copy_data_from_tensor_to_tensor_list(source_tensors, tensors_list): + """ copy data from source tensors to all tensors in tensor list """ + for i, tensors in enumerate(tensors_list): + for t in tensors: + t.data[:] = source_tensors[i][0].data[:] + + def allgather(self, tensors_list, input_tensors, *kwargs): + world1_tensors_list = self._adjust_tensors_list_to_ws1(tensors_list) + handle = self.pg.allgather(world1_tensors_list, input_tensors, *kwargs) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, tensors_list) + return handle + + def gather(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(output_tensors) + handle = self.pg.gather(world1_tensors_list, input_tensors, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, output_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(input_tensors, broadcast_opts) + + def scatter(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(output_tensors, broadcast_opts) + + def reduce_scatter(self, output_tensors, input_tensors, opts): + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.reduce_scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + def alltoall(self, output_tensors, input_tensors, _opts): + world1_in_tensors_list = input_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list = output_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list[0].data[:] = world1_in_tensors_list[0].data[:] + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + handle = self.pg.allreduce(world1_out_tensors_list, opts) + return handle + + def barrier(self, opts): + opts.device_ids = [self.sim_rank] + return self.pg.barrier(opts) + + # ---------------------------------------------------- + # Create group registered function + # ---------------------------------------------------- + @classmethod + def create(cls, _store, rank, world_size, timeout, backend): + return cls(rank, world_size, timeout, backend) + + +def install_sim_dist_backend(sim_world_size, sim_rank): + + def wrapped_dist_init_process_group(backend, + init_method=None, + timeout=default_pg_timeout, + world_size=-1, + rank=-1, + store=None, + group_name="", + pg_options=None): + assert world_size == -1 or world_size == sim_world_size, \ + f'Inconsistent world_size: sim={sim_world_size} dist_init={world_size}' + + assert rank == -1 or rank == sim_rank, \ + f'Inconsistent rank: sim={sim_rank} dist_init={rank}' + + if backend == 'hccl': + import habana_frameworks.torch.distributed.hccl # noqa: F401 + + # override provided init_method/store with a dummy store + # For debug, it is better to use FileStore: + # import os + # my_store_filename = '/tmp/my_store' + # os.remove(my_store_filename) if os.path.exists(my_store_filename) else None + # os.remove(my_store_filename) + # store = torch.distributed.FileStore(my_store_filename, world_size) + store = torch.distributed.TCPStore(host_name="localhost", + port=12355, + world_size=sim_world_size, + is_master=True, + timeout=timedelta(seconds=300), + wait_for_workers=False) + + # set the simulated world size + SimProcessGroup.WORLD_SIZE = sim_world_size + SimProcessGroup.STORE = store + + # register sim backend + # create_fn = partial(SimProcessGroup.create, backend=default_backend) + create_fn = partial(SimProcessGroup.create, backend=backend) + dist.Backend.register_backend(SimProcessGroup.BACKEND, create_fn) + + # emulate all other world devices has joined the newly created group + SimProcessGroup.store_add_rest_of_world(next_group=True) + + orig_dist_init_process_group(backend=SimProcessGroup.BACKEND, + timeout=timeout, + world_size=sim_world_size, + rank=sim_rank, + store=store, + group_name=group_name, + pg_options=pg_options) + + SimProcessGroup.default_pg().post_create_sim_group() + + def wrapped_dist_new_group(ranks=None, timeout=default_pg_timeout, backend=None, pg_options=None): + SimProcessGroup.store_add_rest_of_world(next_group=True) + pg = orig_dist_new_group(ranks=ranks, timeout=timeout, backend=backend, pg_options=pg_options) + + if pg != GroupMember.NON_GROUP_MEMBER: + if backend is None or backend == SimProcessGroup.BACKEND: + pg.post_create_sim_group() + + return pg + + def wrapped_dist_broadcast_object_list(object_list, src=0, group=None, device=None): + rank = SimProcessGroup.default_pg().sim_rank + if src != sim_rank: + raise RuntimeError(f'SimProcessGroup does not support dist.broadcast_object_list() ' + f'for src={src} different than sim_rank={rank}') + return orig_dist_broadcast_object_list(object_list, src, group, device) + + orig_dist_init_process_group = dist.init_process_group + dist.init_process_group = wrapped_dist_init_process_group + + orig_dist_new_group = dist.new_group + dist.new_group = wrapped_dist_new_group + + orig_dist_broadcast_object_list = dist.broadcast_object_list + dist.broadcast_object_list = wrapped_dist_broadcast_object_list diff --git a/deepspeed/tools/pg_sim/ut/base.py b/deepspeed/tools/pg_sim/ut/base.py new file mode 100644 index 000000000000..24889f944070 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/base.py @@ -0,0 +1,311 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +import functools +import torch +import torch.distributed as dist +import pytest + +from pg_sim.pg import (install_sim_dist_backend, GroupMember) + + +class TestBaseWrapper: + """ + BaseTestWrapper class ensures that the test cases encapsulated + in ProcessGroupSimTestBase will only be executed by subclasses. + """ + + class ProcessGroupSimTestBase(unittest.TestCase): + + def setUp(self) -> None: + self.world_size = 8 + self.rank = 0 + self.backend = self.get_backend() + self.device = self.get_device() + + self.assertIsNotNone(self.backend) + self.assertIsNotNone(self.device) + + install_sim_dist_backend(sim_world_size=self.world_size, sim_rank=self.rank) + + dist.init_process_group(backend=self.backend, + init_method=None, + store=None, + rank=self.rank, + world_size=self.world_size) + + def get_backend(self): + self.assertTrue(False, msg='get_backend must be implemented by derived test') + + def get_device(self): + self.assertTrue(False, msg='get_device must be implemented by derived test') + + def _get_row_first_rank(self): + row_ranks = list(set(range(self.world_size)) - {self.rank}) + return row_ranks[0] if row_ranks else None + + @staticmethod + def _get_torch_version(): + return int(torch.__version__.split('.')[1]) + + @pytest.mark.forked + def test_world(self): + res_rank = dist.get_rank() + res_ws = dist.get_world_size() + self.assertEqual(res_rank, self.rank) + self.assertEqual(res_ws, self.world_size) + + @pytest.mark.forked + def test_new_group(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + + pg_1 = dist.new_group(ranks=[self.rank]) + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM, group=pg_1) + self.assertTrue(t.eq(t_in_out).all()) + + row_rank = self._get_row_first_rank() + if row_rank: + pg_2 = dist.new_group(ranks=[row_rank]) + self.assertEqual(pg_2, GroupMember.NON_GROUP_MEMBER) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_impl(self, src): + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=False) + self.assertIsNone(handle) + + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_broadcast_src(self): + self._test_broadcast_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_broadcast_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_object_type_impl(self, src): + if dist.get_rank() == src: + objects = ["foo", 12, {1: 2}] + else: + objects = [None, None, None] + + dev = torch.device(self.device) + dist.broadcast_object_list(objects, src=src, device=dev) + + @pytest.mark.forked + def test_broadcast_object_type_src(self): + self._test_broadcast_object_type_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_object_type_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + with pytest.raises(RuntimeError): + self._test_broadcast_object_type_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_reduce(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM) + self.assertTrue(t.eq(t_in_out).all()) + + def _test_reduce_impl(self, dst): + t = torch.tensor([1.0, 2.0]).to(self.device) + t_in_out = t.clone() + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=False) + self.assertIsNone(handle) + self.assertTrue(t.eq(t_in_out).all()) + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(t.eq(t_in_out).all()) + + @pytest.mark.forked + def test_reduce_src(self): + self._test_reduce_impl(dst=self.rank) + + @pytest.mark.forked + def test_reduce_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_reduce_impl(dst=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_gather(self): + tensor_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + tensor = torch.ones(2).to(self.device) + + handle = dist.all_gather(tensor_list, tensor, async_op=False) + self.assertIsNone(handle) + self.assertTrue(tensor_list[0].eq(tensor).all()) + + handle = dist.all_gather(tensor_list, tensor, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(tensor_list[0].eq(tensor).all()) + + def _test_gather_impl(self, dst, local_dst): + torch_version = self._get_torch_version() + if (self.backend == 'nccl') and (torch_version <= 10): + self.skipTest(f'Skipping {self._testMethodName} for nccl ' + f'for torch.version={torch_version}') + + tensor = torch.ones(2).to(self.device) + gather_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_dst else None + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=False) + self.assertIsNone(handle) + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + @pytest.mark.forked + def test_gather_src(self): + self._test_gather_impl(dst=self.rank, local_dst=True) + + @pytest.mark.forked + def test_gather_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_gather_impl(dst=row_rank, local_dst=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_scatter_impl(self, src, local_src): + if self.backend not in ('gloo', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + tensor = torch.ones(2).to(self.device) + scatter_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_src else None + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=False) + self.assertIsNone(handle) + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + @pytest.mark.forked + def test_scatter_src(self): + self._test_scatter_impl(src=self.rank, local_src=True) + + @pytest.mark.forked + def test_scatter_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_scatter_impl(src=row_rank, local_src=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_reduce_scatter(self): + if self.backend not in ('nccl', 'hccl'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output = torch.ones(2).to(self.device) + input_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + + handle = dist.reduce_scatter(output, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue(input_list[self.rank].eq(output).all()) + + handle = dist.reduce_scatter(output, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(input_list[self.rank].eq(output).all()) + + @pytest.mark.forked + def test_all_to_all(self): + if self.backend not in ('nccl', 'hccl', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output_list = [torch.zeros(1).to(self.device) for _ in range(self.world_size)] + input_list = list( + torch.arange(self.world_size, dtype=torch.float32).add(1.).to(self.device).chunk(self.world_size)) + + expected_res = [ + torch.zeros(1).to(self.device) if i != self.rank else torch.ones(1).to(self.device) + for i in range(self.world_size) + ] + + handle = dist.all_to_all(output_list, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + handle = dist.all_to_all(output_list, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + @pytest.mark.forked + def test_barrier(self): + handle = dist.barrier(async_op=False) + self.assertIsNone(handle) + + handle = dist.barrier(async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_send(self): + tensor = torch.ones(2).to(self.device) + dist.send(tensor, dst=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.send(tensor, dst=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.isend(tensor, dst=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.isend(tensor, dst=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_recv(self): + tensor = torch.zeros(2).to(self.device) + dist.recv(tensor, src=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.recv(tensor, src=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.irecv(tensor, src=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.irecv(tensor, src=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() diff --git a/deepspeed/tools/pg_sim/ut/gloo.py b/deepspeed/tools/pg_sim/ut/gloo.py new file mode 100644 index 000000000000..347f9b96a741 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/gloo.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestGloo(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestGloo, self).setUp() + + def get_backend(self): + return 'gloo' + + def get_device(self): + return 'cpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/hccl.py b/deepspeed/tools/pg_sim/ut/hccl.py new file mode 100644 index 000000000000..37fabcfb9363 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/hccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestHccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestHccl, self).setUp() + + def get_backend(self): + return 'hccl' + + def get_device(self): + return 'hpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/nccl.py b/deepspeed/tools/pg_sim/ut/nccl.py new file mode 100644 index 000000000000..da6b8f23ed47 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/nccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestNccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestNccl, self).setUp() + + def get_backend(self): + return 'nccl' + + def get_device(self): + return 'cuda' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/tensor_logger/__init__.py b/deepspeed/tools/tensor_logger/__init__.py new file mode 100644 index 000000000000..55ea7227e48d --- /dev/null +++ b/deepspeed/tools/tensor_logger/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .tensor_logger import TensorLogger, save_logged_tensors diff --git a/deepspeed/tools/tensor_logger/tensor_logger.py b/deepspeed/tools/tensor_logger/tensor_logger.py new file mode 100644 index 000000000000..f128b9a6b0a0 --- /dev/null +++ b/deepspeed/tools/tensor_logger/tensor_logger.py @@ -0,0 +1,234 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import collections +from functools import partial +from contextlib import contextmanager +from torch.functional import Tensor +from os import makedirs +from os.path import join + + +class TensorLogger: + """ Records nn.Module's activations and gradients tensors + + Description: + Records up to end_iteration(if 0, recording is disabled) from start_iteration. + If log_activations_enabled, nn.Module's activations are recorded during forward. + If log_grads_enabled, nn.Module's gradients are recorded during back propagation. + If log_inputs_enabled, model inputs are recorded. + + Usage: + Integrated within the training loop: + tensor_logger = TensorLogger(model, start_iteration=2, end_iteration=2) + # dumps second iteration only, iteration number starts with 1 + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger.log_iteration(i): + # run forward/backward iteration + + tensor_logger.save(filename) + + Another alternative: + tensor_logger = TensorLogger(model, end_iteration=2) + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger: + tensor_logger.set_iteration(i) + # run forward/backward iteration + + tensor_logger.save(filename) + + Implementation notes: + forward/backward activations/gradients are collected using nn.Module hooks. + However, model inputs are collected by overloading model.forward() method. + Model inputs can't be collected using the hooks since the hooks only provide + inputs and do not provide kwargs, if exist, of the forward method. + """ + + def __init__(self, + model, + start_iteration=0, + end_iteration=0, + log_activations_enabled=False, + log_grads_enabled=False, + log_inputs_enabled=False, + prefix=None): + + # for now, no support for virtual pipeline (interleaved) + if isinstance(model, list): + assert len(model) == 1, 'No support for list of multiple models (len={})'.format(len(model)) + model = model[0] + + self.model = model + self.start_iteration = start_iteration + self.end_iteration = end_iteration + self.log_activations_enabled = log_activations_enabled + self.log_grads_enabled = log_grads_enabled + self.log_inputs_enabled = log_inputs_enabled + self.prefix = 'model' if prefix is None else prefix + + # captured tensors are saved in the following hierarchy: + # { + # iteration: { # iteration number + # tensor_type: { # fwd_act/bwd_grad_in/bwd_grad_out + # name: [tensors] # tensor name's tensors. list is required due to e.g. grad accumulation + # } + # } + # } + class IterData(dict): + + def __init__(self): + super(IterData, self).__init__() + self['fwd_act'] = collections.defaultdict(list) + self['bwd_grad_in'] = collections.defaultdict(list) + self['bwd_grad_out'] = collections.defaultdict(list) + self['model_inputs'] = collections.defaultdict(list) + + self.data = collections.defaultdict(IterData) + self.active = False + self.current_iteration = 0 + self.fwd_handles = [] + self.bwd_handles = [] + + def _fqn(self, name): + return '.'.join([self.prefix, name]) if name else self.prefix + + def set_iteration(self, iteration): + self.current_iteration = iteration + + def get_num_recorded_iterations(self): + return len(self.data) + + @contextmanager + def log_iteration(self, iteration): + self.current_iteration = iteration + self._enable() + yield self + self._disable() + + def __enter__(self): + self._enable() + return self + + def __exit__(self): + self._disable() + + def clear(self): + self.data.clear() + + def save(self, filename, do_clear=True): + + def convert_for_pickle(obj): + if isinstance(obj, dict): + return {k: convert_for_pickle(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_for_pickle(e) for e in obj] + elif isinstance(obj, tuple): + return tuple([convert_for_pickle(e) for e in obj]) + else: + if isinstance(obj, Tensor): + return obj.detach().cpu() + else: + return obj + + data = convert_for_pickle(self.data) + torch.save(data, filename) + self.clear() if do_clear else None + + def _enable(self): + if not self.active and self.start_iteration <= self.current_iteration <= self.end_iteration: + self.active = True + self._enable_log_grads() if self.log_grads_enabled else None + self._enable_log_activations() if self.log_activations_enabled else None + self._enable_log_inputs() if self.log_inputs_enabled else None + + def _disable(self): + if self.active: + self.active = False + self._disable_log_grads() + self._disable_log_activations() + self._disable_log_inputs() + + @staticmethod + def _extract_tensors(t): + if t is None: + return None + elif isinstance(t, int): + return torch.tensor(t) + elif isinstance(t, torch.Tensor): + return t.detach().contiguous() + elif isinstance(t, list): + return [TensorLogger._extract_tensors(e) for e in t] + elif isinstance(t, tuple): + return tuple(TensorLogger._extract_tensors(e) for e in t) + elif isinstance(t, dict): + return {k: TensorLogger._extract_tensors(v) for k, v in t.items()} + assert False, 'Unsupported type: {}'.format(type(t)) + + def _save_fwd_activation(self, name, _mod, _inp, out): + fwd_act = self._extract_tensors(out) + self.data[self.current_iteration]['fwd_act'][name].append(fwd_act) + + def _save_bwd_grads(self, name, _mod, grad_input, grad_output): + grad_in = self._extract_tensors(grad_input) + grad_out = self._extract_tensors(grad_output) + self.data[self.current_iteration]['bwd_grad_in'][name].append(grad_in) + self.data[self.current_iteration]['bwd_grad_out'][name].append(grad_out) + + def _save_inputs(self, *inp, **kwargs): + model_inputs = self._extract_tensors(inp) + model_kwargs = self._extract_tensors(kwargs) + self.data[self.current_iteration]['model_inputs']['inputs'].append(model_inputs) + self.data[self.current_iteration]['model_inputs']['kwargs'].append(model_kwargs) + + def _enable_log_grads(self): + #Revert after [SW-69765] is fixed + full_bwd_hook_supported = False + for name, m in self.model.named_modules(): + register_fn = m.register_full_backward_hook if full_bwd_hook_supported else m.register_backward_hook + h = register_fn(partial(self._save_bwd_grads, self._fqn(name))) + self.bwd_handles.append(h) + + def _enable_log_activations(self): + for name, m in self.model.named_modules(): + h = m.register_forward_hook(partial(self._save_fwd_activation, self._fqn(name))) + self.fwd_handles.append(h) + + def _enable_log_inputs(self): + + def wrapped_forward(*inputs, **kwargs): + self._save_inputs(*inputs, **kwargs) + return self.model.original_forward__(*inputs, **kwargs) + + self.model.original_forward__ = self.model.forward + self.model.forward = wrapped_forward + + def _disable_log_grads(self): + for h in self.bwd_handles: + h.remove() + self.bwd_handles = [] + + def _disable_log_activations(self): + for h in self.fwd_handles: + h.remove() + self.fwd_handles = [] + + def _disable_log_inputs(self): + if hasattr(self.model, 'original_forward__'): + self.model.forward = self.model.original_forward__ + del self.model.original_forward__ + + +def save_logged_tensors(tensor_logger: TensorLogger, tensor_logger_path, rank_no, iteration=None): + if tensor_logger.get_num_recorded_iterations(): + makedirs(tensor_logger_path, exist_ok=True) + filename = 'tensor_logger_rank_{}'.format(rank_no) + '.pt' + if iteration is not None: + filename = 'tensor_logger_rank_{}_iter_{}'.format(rank_no, iteration) + '.pt' + fullname = join(tensor_logger_path, filename) + tensor_logger.save(fullname) diff --git a/deepspeed/utils/groups.py b/deepspeed/utils/groups.py index c49f4520e16e..381c1f895652 100644 --- a/deepspeed/utils/groups.py +++ b/deepspeed/utils/groups.py @@ -34,6 +34,8 @@ _EXPERT_PARALLEL_GROUP = {} # Expert data parallel group that the current rank belongs to. _EXPERT_DATA_PARALLEL_GROUP = {} +# Expert Parallel combined with Tensor Parallel that the current rank belongs to. +_EXPERT_TENSOR_PARALLEL_GROUP = {} # dist world group needs to be cloned for some cases _WORLD_GROUP = None # ZeRO parameter partitioning group that the current rank belongs to. @@ -251,7 +253,36 @@ def _get_expert_parallel_ranks(world_size, return expert_parallel_groups, expert_data_parallel_groups -def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_before_expert_parallel_=False): +def _get_expert_tensor_parallel_ranks(expert_parallel_groups): + # create a dict from each rank to the ep_group ranks it belongs to + rank_to_ep_group = {} + for ranks in expert_parallel_groups: + for rank in ranks: + rank_to_ep_group[rank] = ranks + + # go over all tensor groups, rank by rank + # for each rank, add the ep_ranks to current tensor group, if not already added + # in order to add ep ranks only once, we delete all rank members from rank_to_ep_group + global expert_tensor_parallel_world_size + world_size = dist.get_world_size() + expert_tensor_parallel_groups = [] + for i in range(world_size // expert_tensor_parallel_world_size): + ep_tp_ranks = [] + for t in range(expert_tensor_parallel_world_size): + rank = i * expert_tensor_parallel_world_size + t + ep_ranks = rank_to_ep_group.get(rank, []) + for r in ep_ranks: + rank_to_ep_group.pop(r) + ep_tp_ranks.extend(ep_ranks) + if ep_tp_ranks: + expert_tensor_parallel_groups.append(sorted(ep_tp_ranks)) + return expert_tensor_parallel_groups + + +def _create_expert_data_and_model_parallel(expert_parallel_size_, + mpu, + use_data_before_expert_parallel_=False, + create_expert_tensor_parallel_group=False): """ Create expert and data parallel groups based on MPU (model parallel) group. @@ -304,6 +335,18 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_ if rank in list(ranks): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group + if create_expert_tensor_parallel_group: + # calculate ep_tp_groups and validate correct number of groups + expert_tensor_parallel_groups = _get_expert_tensor_parallel_ranks(expert_parallel_groups) + n_ep_tp_groups = world_size // expert_parallel_size_ // expert_tensor_parallel_world_size + assert n_ep_tp_groups == len(expert_tensor_parallel_groups) + + # create groups + for ranks in expert_tensor_parallel_groups: + group = dist.new_group(ranks) + if rank in list(ranks): + _EXPERT_TENSOR_PARALLEL_GROUP[group_name] = group + def _get_max_expert_size(): """Get the maximum ep_size from all the created groups.""" @@ -350,6 +393,18 @@ def _get_expert_data_parallel_group_dict(): return _EXPERT_DATA_PARALLEL_GROUP +def _get_expert_tensor_parallel_group(group_name): + """Get the expert tensor parallel group the caller rank belongs to.""" + assert group_name in _EXPERT_TENSOR_PARALLEL_GROUP, \ + f'expert tensor parallel group is not initialized for {group_name=}' + return _EXPERT_TENSOR_PARALLEL_GROUP[group_name] + + +def _get_expert_tensor_parallel_group_dict(): + """Get the expert tensor parallel group dict.""" + return _EXPERT_TENSOR_PARALLEL_GROUP + + def _clone_world_group(): """Create a clone of the world group Note: We need to clone the dist world group because we diff --git a/deepspeed/utils/logging.py b/deepspeed/utils/logging.py index 1e62d96e1032..97496f7db487 100644 --- a/deepspeed/utils/logging.py +++ b/deepspeed/utils/logging.py @@ -7,6 +7,7 @@ import logging import sys import os +import torch log_levels = { "debug": logging.DEBUG, @@ -19,6 +20,18 @@ class LoggerFactory: + @staticmethod + def logging_decorator(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if torch._dynamo.is_compiling(): + return + else: + return func(*args, **kwargs) + + return wrapper + @staticmethod def create_logger(name=None, level=logging.INFO): """create a logger @@ -44,6 +57,10 @@ def create_logger(name=None, level=logging.INFO): ch.setLevel(level) ch.setFormatter(formatter) logger_.addHandler(ch) + if os.getenv("DISABLE_LOGS_WHILE_COMPILING", "0") == "1": + for method in ['info', 'debug', 'error', 'warning', 'critical', 'exception']: + original_logger = getattr(logger_, method) + setattr(logger_, method, LoggerFactory.logging_decorator(original_logger)) return logger_ diff --git a/op_builder/cpu/builder.py b/op_builder/cpu/builder.py index d881842ad0b1..d2bc8eacfa25 100644 --- a/op_builder/cpu/builder.py +++ b/op_builder/cpu/builder.py @@ -30,11 +30,7 @@ def builder(self): return cpp_ext def cxx_args(self): - args = ['-O3', '-g', '-Wno-reorder'] - CPU_ARCH = self.cpu_arch() - SIMD_WIDTH = self.simd_width() - args += [CPU_ARCH, '-fopenmp', SIMD_WIDTH] - return args + return ['-O3', '-g', '-Wno-reorder'] def libraries_args(self): return [] diff --git a/op_builder/hpu/__init__.py b/op_builder/hpu/__init__.py index 6527ace087b5..5ad1b9a7f891 100644 --- a/op_builder/hpu/__init__.py +++ b/op_builder/hpu/__init__.py @@ -7,4 +7,5 @@ from .cpu_adam import CPUAdamBuilder from .fused_adam import FusedAdamBuilder +from .transformer_inference import InferenceBuilder from .no_impl import NotImplementedBuilder diff --git a/op_builder/hpu/builder.py b/op_builder/hpu/builder.py index c176a586ba49..3c86128fffd6 100644 --- a/op_builder/hpu/builder.py +++ b/op_builder/hpu/builder.py @@ -31,11 +31,7 @@ def builder(self): return cpp_ext def cxx_args(self): - args = ['-O3', '-g', '-Wno-reorder'] - CPU_ARCH = self.cpu_arch() - SIMD_WIDTH = self.simd_width() - args += [CPU_ARCH, '-fopenmp', SIMD_WIDTH] - return args + return ['-O3', '-g', '-Wno-reorder'] def libraries_args(self): return [] diff --git a/op_builder/hpu/transformer_inference.py b/op_builder/hpu/transformer_inference.py new file mode 100644 index 000000000000..e397c99200ec --- /dev/null +++ b/op_builder/hpu/transformer_inference.py @@ -0,0 +1,39 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 +import importlib + +# DeepSpeed Team + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class InferenceBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" + NAME = "transformer_inference" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=self.NAME) + + def absolute_name(self): + return f"deepspeed.ops.transformer.inference.{self.NAME}_op" + + def sources(self): + return [] + + def load(self, verbose=True): + if self.name in __class__._loaded_ops: + return __class__._loaded_ops[self.name] + + from deepspeed.git_version_info import installed_ops # noqa: F401 + if installed_ops.get(self.name, False): + op_module = importlib.import_module(self.absolute_name()) + __class__._loaded_ops[self.name] = op_module + return op_module diff --git a/pre-commit-toggle.sh b/pre-commit-toggle.sh new file mode 100755 index 000000000000..c458c7f2d0da --- /dev/null +++ b/pre-commit-toggle.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Path to the pre-commit configuration file +PRE_COMMIT_CONFIG=".pre-commit-config.yaml" + +# Install pre-commit +pip install -r ${DEEPSPEED_FORK_ROOT}/requirements/requirements-dev.txt + +# Enable pre-commit +function enable_pre_commit() { + # Install pre-commit hooks + pre-commit install +} + +# Disable pre-commit +function disable_pre_commit() { + # Install pre-commit hooks + pre-commit uninstall +} + +# Check if the pre-commit configuration file exists +if [ -f "$PRE_COMMIT_CONFIG" ]; then + echo "Pre-commit configuration file found: $PRE_COMMIT_CONFIG" +else + echo "Pre-commit configuration file not found: $PRE_COMMIT_CONFIG" + exit 1 +fi + +# Check the command-line argument to enable or disable pre-commit +if [ "$1" == "enable" ]; then + enable_pre_commit +elif [ "$1" == "disable" ]; then + disable_pre_commit +else + echo "Usage: ./pre-commit-toggle.sh [enable|disable]" + exit 1 +fi diff --git a/requirements/requirements-sparse_attn.txt b/requirements/requirements-sparse_attn.txt index f929bb0168a5..09386fdcb120 100755 --- a/requirements/requirements-sparse_attn.txt +++ b/requirements/requirements-sparse_attn.txt @@ -1 +1 @@ -triton==1.0.0 +triton==2.0.0.dev20221202 diff --git a/tests/conftest.py b/tests/conftest.py index 45e8434a021b..4e5737724e32 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,14 @@ from os.path import abspath, dirname, join import torch import warnings +from unit.ci_promote_marker import * +from unit.xfail_marker import * +from unit.skip_marker import * +from unit.compile_marker import * +from unit.a100_marker import * +from unit.util import get_hpu_dev_version +from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled # Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small) os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' @@ -70,6 +78,100 @@ def pytest_runtest_call(item): item.runtest = lambda: True # Dummy function so test is not run twice +def pytest_collection_modifyitems(items, config): + device = get_accelerator().device_name() + gaudi_dev = get_hpu_dev_version() + hpu_lazy_mode = hpu_lazy_enabled() + # Add comipile, CI and Promote marker + marker_expression = config.getoption("-m") + # This is to handle the case where marker is already present and compile marker is added. to avoid running of compile tests in other markers when not specified + if marker_expression not in ["compile_1c", "compile_4c"]: + deselected = [] + remaining_items = [] + for item in items: + if item._nodeid in compile_tests_4c or item._nodeid in compile_tests_1c: + deselected.append(item) + continue + remaining_items.append(item) + items[:] = remaining_items # Only tests with 'compile_mode' False remain + config.hook.pytest_deselected(items=deselected) + for item in items: + if item._nodeid in compile_tests_4c: + item._pyfuncitem.add_marker(pytest.mark.compile_4c) + if item._nodeid in compile_tests_1c: + item._pyfuncitem.add_marker(pytest.mark.compile_1c) + if device != 'hpu': + if item._nodeid in a100_tests: + item._pyfuncitem.add_marker(pytest.mark.a100) + if item._nodeid in hpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci) + if item._nodeid in hpu_ci_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci_4cards) + if item._nodeid in gpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_ci) + if item._nodeid in hpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote) + if item._nodeid in hpu_promote_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote_4cards) + if item._nodeid in gpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_promote) + + # Add xfail and SKIP marker + item.user_properties.append(("module_name", item.module.__name__)) + if device == 'hpu': + # Lazy Run + if hpu_lazy_mode: + if item._nodeid in hpu_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_lazy_xfail_tests[item._nodeid])) + if item._nodeid in hpu_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g1_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g2_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi3": + if item._nodeid in g3_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g3_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g3_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g3_lazy_skip_tests[item._nodeid])) + # Eager Run + else: + if item._nodeid in hpu_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_eager_xfail_tests[item._nodeid])) + if item._nodeid in hpu_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_eager_xfail_tests[item._nodeid])) + if item._nodeid in g1_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_eager_xfail_tests[item._nodeid])) + if item._nodeid in g2_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi3": + if item._nodeid in g3_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g3_eager_xfail_tests[item._nodeid])) + if item._nodeid in g3_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g3_eager_skip_tests[item._nodeid])) + else: + if item._nodeid in gpu_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=gpu_xfail_tests[item._nodeid])) + if item._nodeid in gpu_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=gpu_skip_tests[item._nodeid])) + for marker in item.own_markers: + if marker.name in ['skip', 'xfail']: + if 'reason' in marker.kwargs: + item.user_properties.append(("message", marker.kwargs['reason'])) + + # We allow DistributedTest to reuse distributed environments. When the last # test for a class is run, we want to make sure those distributed environments # are destroyed. @@ -85,3 +187,11 @@ def pytest_fixture_setup(fixturedef, request): if getattr(fixturedef.func, "is_dist_fixture", False): dist_fixture_class = fixturedef.func() dist_fixture_class(request) + + +def pytest_runtest_makereport(item, call): + if call.when == 'call': + if call.excinfo: + if not (any('message' in prop for prop in item.user_properties)): + if call.excinfo.value: + item.user_properties.append(("message", call.excinfo.value)) diff --git a/tests/pytest.ini b/tests/pytest.ini index f841c47afc0c..bbd4a555bcd3 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation" +addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation and not compile_4c and not compile_1c and not a100" markers = sequential:Tests that need to be run sequentially inference:Inference model tests @@ -11,3 +11,11 @@ markers = world_size:Change world size of individual tests in a class stable_diffusion:Tests that run Stable Diffusion evaluation:Tests that evaluate model correctness + compile: torch.compile tests + hpu_ci: hpu CI tests + hpu_ci_4cards: HPU CI with 4cards + hpu_promote: HPU Promote tests + hpu_promote_4cards: HPU Promote with 4cards + gpu_ci: GPU CI tests + gpu_promote: GPU Promote tests + a100: Run Unsupported titan-xp tests on a100 diff --git a/tests/unit/a100_marker.py b/tests/unit/a100_marker.py new file mode 100644 index 000000000000..0431f8987d06 --- /dev/null +++ b/tests/unit/a100_marker.py @@ -0,0 +1,84 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +a100_tests = [ + "unit/runtime/half_precision/test_bf16.py::TestZero2ReduceScatterOff::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyPartition::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-fp32]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[fp16-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-fp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/runtime/half_precision/test_bf16.py::TestAdamBF16ZeroOneCycleCompatibility::test", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype0]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]", + "unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py::TestSparseAdam::test", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config0]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config2]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config0]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config1]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-False]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[nvme-nvme]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[cpu-nvme]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[cpu-cpu]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype1]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype2]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype2]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype1]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-local-False]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-local-True]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-full-False]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-full-True]", +] diff --git a/tests/unit/alexnet_model.py b/tests/unit/alexnet_model.py index 25256d376eeb..94c818b69192 100644 --- a/tests/unit/alexnet_model.py +++ b/tests/unit/alexnet_model.py @@ -100,12 +100,24 @@ def cifar_trainset(fp16=False): dist.barrier() if local_rank != 0: dist.barrier() - - data_root = os.getenv("TEST_DATA_DIR", "/tmp/") - trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), - train=True, - download=True, - transform=transform) + if os.getenv("CIFAR10_OFFLINE", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=False, + transform=transform) + elif os.getenv("STORE_CIFAR10", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=True, + transform=transform) + else: + data_root = os.getenv("TEST_DATA_DIR", "/tmp/") + trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), + train=True, + download=True, + transform=transform) if local_rank == 0: dist.barrier() return trainset diff --git a/tests/unit/checkpoint/common.py b/tests/unit/checkpoint/common.py index 3fb13b214ea0..957dd54fd826 100644 --- a/tests/unit/checkpoint/common.py +++ b/tests/unit/checkpoint/common.py @@ -17,6 +17,7 @@ from unit.common import preferred_dtype from unit.simple_model import * from unittest.mock import MagicMock, patch +from unit.util import hpu_lazy_enabled def compare_deepspeed_states(saved_model, loaded_model): @@ -155,6 +156,8 @@ def create_moe_param_groups(model): def create_deepspeed_model(config_dict, model, base_optimizer): + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) ds_model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=create_moe_param_groups(model), @@ -174,11 +177,14 @@ def checkpoint_correctness_verification(config_dict, empty_tag=False, seq_dataloader=False, load_module_only=False, - dtype=None): + dtype=None, + compile_mode=False): if dtype == None: dtype = preferred_dtype() ds_model = create_deepspeed_model(config_dict=config_dict, model=models[0], base_optimizer=base_optimizers[0]) + if compile_mode: + ds_model.compile() if seq_dataloader: data_loader = sequence_dataloader(model=ds_model, diff --git a/tests/unit/checkpoint/test_latest_checkpoint.py b/tests/unit/checkpoint/test_latest_checkpoint.py index 5d795c4dadcf..cf9d6976d712 100644 --- a/tests/unit/checkpoint/test_latest_checkpoint.py +++ b/tests/unit/checkpoint/test_latest_checkpoint.py @@ -19,7 +19,8 @@ class TestLatestCheckpoint(DistributedTest): world_size = 1 - def test_existing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_existing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -39,9 +40,11 @@ def test_existing_latest(self, tmpdir): load_optimizer_states=True, load_lr_scheduler_states=False, empty_tag=True, - dtype=torch.float) + dtype=torch.float, + compile_mode=compile_mode) - def test_missing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_missing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -55,5 +58,7 @@ def test_missing_latest(self, tmpdir): hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() # should be no-op, since latest doesn't exist model.load_checkpoint(tmpdir) diff --git a/tests/unit/checkpoint/test_lr_scheduler.py b/tests/unit/checkpoint/test_lr_scheduler.py index 89c4dd1b49f7..b7b64656c543 100644 --- a/tests/unit/checkpoint/test_lr_scheduler.py +++ b/tests/unit/checkpoint/test_lr_scheduler.py @@ -15,12 +15,13 @@ import pytest +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload', [(0, False), (1, False), (2, False), (2, True), (3, False), (3, True)]) class TestLRSchedulerCheckpoint(DistributedTest): world_size = 2 - def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") if get_accelerator().device_name() == 'cpu': @@ -70,9 +71,10 @@ def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=True) + load_lr_scheduler_states=True, + compile_mode=compile_mode) - def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") if get_accelerator().device_name() == 'cpu': @@ -117,4 +119,5 @@ def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=False) + load_lr_scheduler_states=False, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_moe_checkpoint.py b/tests/unit/checkpoint/test_moe_checkpoint.py index 89878b5d8fa9..3f85d64d8a52 100644 --- a/tests/unit/checkpoint/test_moe_checkpoint.py +++ b/tests/unit/checkpoint/test_moe_checkpoint.py @@ -10,6 +10,8 @@ from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification +from unit.util import hpu_lazy_enabled +from deepspeed.accelerator import get_accelerator import pytest @@ -38,8 +40,9 @@ def test_checkpoint_moe(self, tmpdir, ep_size): seq_dataloader=True, dtype=torch.float16) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("ep_size, load_optim_states", [(4, True), (4, False), (2, True), (2, False)]) - def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): + def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states, compile_mode): if not required_torch_version(min_version=1.8): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") @@ -66,6 +69,9 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): hidden_dim = 16 models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] # param group must have a random unique name (for now) # TODO: clean-up this requirement, the unique name should not be required here param_groups = [{'params': [p for p in model.parameters()], 'name': 'random-unique-name'} for model in models] @@ -80,4 +86,5 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): empty_tag=True, base_optimizers=optimizers, seq_dataloader=True, - dtype=torch.float16) + dtype=torch.float16, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_other_optimizer.py b/tests/unit/checkpoint/test_other_optimizer.py index bcff7f5e3072..7cb8c0603bc8 100644 --- a/tests/unit/checkpoint/test_other_optimizer.py +++ b/tests/unit/checkpoint/test_other_optimizer.py @@ -18,7 +18,8 @@ class TestOtherOptimizerCheckpoint(DistributedTest): world_size = 2 @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") - def test_checkpoint_unfused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unfused_optimizer(self, tmpdir, compile_mode): #if not get_accelerator().is_fp16_supported(): # pytest.skip("fp16 is not supported") config_dict = { @@ -62,16 +63,19 @@ def test_checkpoint_unfused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + compile_mode=compile_mode) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + compile_mode=compile_mode) - def test_checkpoint_fused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fused_optimizer(self, tmpdir, compile_mode): if get_accelerator().device_name() == "cpu": pytest.skip("CPU accelerator does not support this test") config_dict = { @@ -101,16 +105,19 @@ def test_checkpoint_fused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + compile_mode=compile_mode) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + compile_mode=compile_mode) - def test_checkpoint_fp32_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fp32_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -135,4 +142,5 @@ def test_checkpoint_fp32_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - dtype=torch.float32) + dtype=torch.float32, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_pipeline.py b/tests/unit/checkpoint/test_pipeline.py index c6c228ccada7..c90f5dbe1cf6 100644 --- a/tests/unit/checkpoint/test_pipeline.py +++ b/tests/unit/checkpoint/test_pipeline.py @@ -15,8 +15,9 @@ class TestPipelineCheckpoint(DistributedTest): world_size = 4 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("zero_stage", [0, 1]) - def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): + def test_checkpoint_pipe_engine(self, zero_stage, tmpdir, compile_mode): skip_on_arch(min_arch=7) config_dict = { @@ -61,7 +62,8 @@ def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): load_optimizer_states=True, load_lr_scheduler_states=True, train_batch=True, - dtype=torch.float16 if zero_stage > 0 else torch.float32) + dtype=torch.float16 if zero_stage > 0 else torch.float32, + compile_mode=compile_mode) @pytest.mark.parametrize( "base_topo,test_topo", diff --git a/tests/unit/checkpoint/test_shared_weights.py b/tests/unit/checkpoint/test_shared_weights.py index ed69073fb81c..d3e0db81af6f 100644 --- a/tests/unit/checkpoint/test_shared_weights.py +++ b/tests/unit/checkpoint/test_shared_weights.py @@ -7,8 +7,11 @@ import torch.nn as nn import deepspeed +import pytest from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint from unit.common import DistributedTest +from unit.util import hpu_lazy_enabled +from deepspeed.accelerator import get_accelerator class ModelWithSharedWeights(nn.Module): @@ -25,7 +28,8 @@ def __init__(self): class TestCheckpointSharedWeights(DistributedTest): world_size = 2 - def test_checkpoint_shared_weights(self, tmp_path): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_shared_weights(self, tmp_path, compile_mode): config = { "train_micro_batch_size_per_gpu": 2, "zero_allow_untested_optimizer": True, @@ -34,6 +38,9 @@ def test_checkpoint_shared_weights(self, tmp_path): }, } model = ModelWithSharedWeights() + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) deepspeed_engine, _, _, _ = deepspeed.initialize( @@ -41,6 +48,9 @@ def test_checkpoint_shared_weights(self, tmp_path): model=model, optimizer=optimizer, ) + if compile_mode: + deepspeed_engine.compile() + filename = tmp_path / "checkpoint.pt" deepspeed_engine.save_checkpoint(filename, tag="checkpoint") diff --git a/tests/unit/checkpoint/test_sparse.py b/tests/unit/checkpoint/test_sparse.py index 19fbcd81e473..e2f0e1dc079b 100644 --- a/tests/unit/checkpoint/test_sparse.py +++ b/tests/unit/checkpoint/test_sparse.py @@ -24,8 +24,9 @@ class TestSparseCheckpoint(DistributedTest): [True, False], [True, True], ]) + @pytest.mark.parametrize('compile_mode', [True, False]) def test_non_strict_load_sparse(self, tmpdir, to_save_model_has_embedding, to_save_model_sparse, - destination_has_embedding, destination_sparse): + destination_has_embedding, destination_sparse, compile_mode): class ModelNoEmbedding(torch.nn.Module): @@ -66,6 +67,10 @@ def forward(self, x, offsets): "sparse_gradients": destination_sparse }) + if compile_mode: + engine_to_save.compile() + engine_destination.compile() + save_folder = os.path.join(tmpdir, 'saved_checkpoint') save_tag = '1' diff --git a/tests/unit/checkpoint/test_tag_validation.py b/tests/unit/checkpoint/test_tag_validation.py index b164c31e52b0..edbc42dcadf4 100644 --- a/tests/unit/checkpoint/test_tag_validation.py +++ b/tests/unit/checkpoint/test_tag_validation.py @@ -14,8 +14,9 @@ class TestCheckpointValidationTag(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('valid_mode', ["FAIL", "WARN", "IGNORE"]) - def test_checkpoint_unique_tag(self, tmpdir, valid_mode): + def test_checkpoint_unique_tag(self, tmpdir, valid_mode, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -33,13 +34,16 @@ def test_checkpoint_unique_tag(self, tmpdir, valid_mode): model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() if valid_mode == "FAIL": with pytest.raises(AssertionError): model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") else: model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") - def test_checkpoint_unknown_tag_validation(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unknown_tag_validation(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, @@ -60,3 +64,5 @@ def test_checkpoint_unknown_tag_validation(self, tmpdir): with pytest.raises(deepspeed.DeepSpeedConfigError): model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() diff --git a/tests/unit/checkpoint/test_universal_checkpoint.py b/tests/unit/checkpoint/test_universal_checkpoint.py index e0c4f4745043..a6b7a402e559 100644 --- a/tests/unit/checkpoint/test_universal_checkpoint.py +++ b/tests/unit/checkpoint/test_universal_checkpoint.py @@ -13,9 +13,10 @@ from unit.common import DistributedTest, DistributedFixture from unit.simple_model import * -from unit.util import bf16_required_version_check +from unit.util import bf16_required_version_check, hpu_lazy_enabled from unit.checkpoint.common import compare_opt_state_dicts, compare_state_dicts +from deepspeed.accelerator import get_accelerator import pytest import deepspeed.comm as dist @@ -79,6 +80,9 @@ def train_save_convert(ds_config, hidden_dim, load_optim, use_torch_adam, dtype, test_step = 8 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model = model.to(device) model = init_ds_engine(model, ds_config, use_torch_adam) data_loader = random_dataloader(model=model, total_samples=test_step, @@ -162,9 +166,10 @@ class baseline_ws4(_baseline): @pytest.mark.parametrize("zero_stage", [1]) @pytest.mark.parametrize("use_torch_adam", [False, True]) @pytest.mark.parametrize("load_optim", [False, True]) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROUniversalCheckpointDP(DistributedTest): - def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam): + def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode): if dtype == torch.bfloat16 and not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" @@ -175,7 +180,14 @@ def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam): ds_config["checkpoint"] = {"load_universal": True} univ_model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + univ_model = univ_model.to(device) + univ_model = init_ds_engine(univ_model, ds_config, use_torch_adam) + if compile_mode: + univ_model.compile() + univ_model.load_checkpoint(tmpdir, tag=f"{CP_TAG}_universal", load_optimizer_states=load_optim) model_state = univ_model.state_dict() @@ -203,13 +215,16 @@ def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam): univ_model.step() @pytest.mark.world_size(2) - def test_dp_world_size_2to2(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam): - self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + def test_dp_world_size_2to2(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) @pytest.mark.world_size(2) - def test_dp_world_size_4to2(self, baseline_ws4, tmpdir, dtype, ds_config, load_optim, use_torch_adam): - self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + def test_dp_world_size_4to2(self, baseline_ws4, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) @pytest.mark.world_size(4) - def test_dp_world_size_2to4(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam): - self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam) + def test_dp_world_size_2to4(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) diff --git a/tests/unit/checkpoint/test_zero_optimizer.py b/tests/unit/checkpoint/test_zero_optimizer.py index 84b4eca6e2ca..1e5587a02078 100644 --- a/tests/unit/checkpoint/test_zero_optimizer.py +++ b/tests/unit/checkpoint/test_zero_optimizer.py @@ -12,6 +12,7 @@ from unit.common import DistributedTest, DistributedFixture from unit.simple_model import * +from unit.util import hpu_lazy_enabled from unit.checkpoint.common import * @@ -21,8 +22,9 @@ class TestZeROCheckpoint(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [3]) - def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): + def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -41,14 +43,19 @@ def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + compile_mode=compile_mode) - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) - + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, 'Adam'), (2, False, 'Adam'), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -81,14 +88,23 @@ def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_op models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=True, + compile_mode=compile_mode) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, "Adam"), (2, False, "Adam"), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -122,11 +138,20 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, ada models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=False, + compile_mode=compile_mode) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_hybrid_optimizer_state(self, tmpdir, zero_stage): + def test_hybrid_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, @@ -142,6 +167,9 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): config_dict["bf16"] = {"enabled": True} hidden_dim = 10 models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] optimizers = [HybridStateOptimizer(model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, @@ -149,10 +177,12 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): base_optimizers=optimizers, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + compile_mode=compile_mode) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): if zero_stage == 0 and get_accelerator().device_name() == "cpu": pytest.skip("CPU Accelerator does not support this test") config_dict = { @@ -175,8 +205,16 @@ def test_load_module_only(self, tmpdir, zero_stage): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + compile_mode=compile_mode) class ws4_model_checkpoint(DistributedFixture): @@ -212,13 +250,14 @@ def run(self, class_tmpdir, elastic_save, load_optim): model.save_checkpoint(class_tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("elastic_save", [True, False]) @pytest.mark.parametrize("elastic_load", [True, False]) @pytest.mark.parametrize("load_optim", [True, False]) class TestZeROElasticCheckpoint(DistributedTest): world_size = 2 - def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim): + def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim, compile_mode): ds_config = { "train_batch_size": 2, "optimizer": { @@ -243,6 +282,8 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[0], model_parameters=models[0].parameters()) + if compile_mode: + model.compile() run_steps = 8 data_loader = random_dataloader(model=model, total_samples=run_steps, @@ -261,6 +302,8 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[1], model_parameters=models[1].parameters()) + if compile_mode: + model.compile() model.load_checkpoint(tmpdir, load_optimizer_states=load_optim) if load_optim: @@ -275,7 +318,7 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l model.step() def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, elastic_save, elastic_load, - load_optim): + load_optim, compile_mode): ds_config = { "train_batch_size": 4, "optimizer": { @@ -295,6 +338,8 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, # Load checkpoint with dp world size = 2 model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() if load_optim: with pytest.raises(deepspeed.runtime.zero.utils.ZeRORuntimeException): model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) @@ -302,11 +347,12 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROSaveLoadEdgeCase(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_immediate_save_load(self, tmpdir, zero_stage): + def test_immediate_save_load(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 4, "optimizer": { @@ -324,6 +370,8 @@ def test_immediate_save_load(self, tmpdir, zero_stage): model = SimpleModel(hidden_dim) ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() ds_model.save_checkpoint(tmpdir) ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, @@ -331,7 +379,7 @@ def test_immediate_save_load(self, tmpdir, zero_stage): load_module_only=False) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_immediate_save(self, tmpdir, zero_stage): + def test_load_immediate_save(self, tmpdir, zero_stage, compile_mode): if zero_stage == 0 and get_accelerator().device_name() == "cpu": pytest.skip("CPU Accelerator does not support this test") config_dict = { @@ -352,6 +400,8 @@ def test_load_immediate_save(self, tmpdir, zero_stage): # 1. pretrain a model and save it ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() data_loader = random_dataloader(model=ds_model, total_samples=1, hidden_dim=hidden_dim, device=ds_model.device) for _, batch in enumerate(data_loader): loss = ds_model(batch[0], batch[1]) @@ -363,6 +413,8 @@ def test_load_immediate_save(self, tmpdir, zero_stage): # 2. load and immediately save a model with a fresh ds engine ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False, @@ -370,7 +422,7 @@ def test_load_immediate_save(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): + def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage, compile_mode): config_dict = { "optimizer": { "type": 'Adam' @@ -395,6 +447,8 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): # So we config grad_accum=2 and step only once and save_16bit_model ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() data_loader = random_dataloader(model=ds_model, total_samples=2, hidden_dim=hidden_dim, device=ds_model.device) batch = next(iter(data_loader)) @@ -411,11 +465,12 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROCheckpointFrozenWeights(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_optimizer_state(self, tmpdir, zero_stage): + def test_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -442,11 +497,19 @@ def test_load_optimizer_state(self, tmpdir, zero_stage): with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=True, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -472,11 +535,19 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage): with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=False, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -494,11 +565,19 @@ def test_load_module_only(self, tmpdir, zero_stage): with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -518,6 +597,8 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): model = SimpleFrozenModel(hidden_dim, empty_grad=False) ds_engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) + if compile_mode: + ds_engine.compile() # Validate backwards-compatibility of including frozen parameters in checkpoint all_ckpt_folder = os.path.join(tmpdir, 'all_params') @@ -546,7 +627,7 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): assert loaded_trainable_param_names == trainable_param_names @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -566,6 +647,8 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): model = SimpleFrozenModel(hidden_dim, empty_grad=False) ds_engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) + if compile_mode: + ds_engine.compile() # Validate custom state_dict model state_dict_bk = model.state_dict @@ -590,9 +673,10 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): class TestSaveTensorClone(DistributedTest): world_size = 1 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) @pytest.mark.parametrize('use_cpu_device', [True, False]) - def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): + def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device, compile_mode): ds_config = { "optimizer": { @@ -609,6 +693,8 @@ def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): ref_model_state_dict = model.state_dict() ds_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config) + if compile_mode: + ds_engine.compile() clone_device = torch.device('cpu') if use_cpu_device else get_accelerator().current_device() clone_state_dict = clone_tensors_for_torch_save(ds_engine.module.state_dict()) compare_state_dicts(ref_model_state_dict, clone_state_dict) @@ -625,8 +711,9 @@ class TestZeRONonDistributed(DistributedTest): world_size = 1 init_distributed = False + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_chmod_exception_handling(self, monkeypatch, zero_stage): + def test_chmod_exception_handling(self, monkeypatch, zero_stage, compile_mode): config_dict = { "optimizer": { @@ -644,6 +731,8 @@ def test_chmod_exception_handling(self, monkeypatch, zero_stage): model=net, model_parameters=net.parameters()) + if compile_mode: + engine.compile() log_called = False def mock_logger_info(message, *args, **kwargs): diff --git a/tests/unit/ci_promote_marker.py b/tests/unit/ci_promote_marker.py new file mode 100644 index 000000000000..7bd83195723a --- /dev/null +++ b/tests/unit/ci_promote_marker.py @@ -0,0 +1,606 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]" +] + +hpu_ci_tests_4cards = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]", + "unit/comm/test_dist.py::TestDistInitNoEnv::test", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_throughput_calculation", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-fp32-zero1]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1]", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config0]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]", + "unit/runtime/half_precision/test_fp16.py::TestFP16OptimizerForMoE::test_unfused_gradnorm", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-350m-fp16]" +] + +hpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] + +hpu_promote_tests_4cards = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-fp16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-None]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config2]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-125m-fp16]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-neo-125M-fp16]" +] + +gpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-noCG]" +] + +gpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/compression/test_compression.py::TestCompression::test_conv1d_convertion", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] diff --git a/tests/unit/common.py b/tests/unit/common.py index 1774bcfae9ff..966acfa6145b 100644 --- a/tests/unit/common.py +++ b/tests/unit/common.py @@ -23,7 +23,7 @@ from _pytest.fixtures import FixtureLookupError, FixtureFunctionMarker # Worker timeout for tests that hang -DEEPSPEED_TEST_TIMEOUT = int(os.environ.get('DS_UNITTEST_TIMEOUT', '600')) +DEEPSPEED_TEST_TIMEOUT = int(os.environ.get('DEEPSPEED_TEST_TIMEOUT', '600')) def is_rocm_pytorch(): @@ -93,6 +93,15 @@ def set_accelerator_visible(): elif get_accelerator().device_name() == 'npu': npu_smi = subprocess.check_output(['npu-smi', 'info', '-l']) num_accelerators = int(npu_smi.decode('utf-8').strip().split('\n')[0].split(':')[1].strip()) + elif get_accelerator().device_name() == 'hpu': + try: + hl_smi = subprocess.check_output(['hl-smi', "-L"]) + num_accelerators = re.findall(r"Module ID\s+:\s+(\d+)", hl_smi.decode()) + except FileNotFoundError: + sim_list = subprocess.check_output(['ls', '-1', '/dev/accel']) + num_accelerators = re.findall(r"accel(\d+)", sim_list.decode()) + num_accelerators = sorted(num_accelerators, key=int) + os.environ["HABANA_VISIBLE_MODULES"] = ",".join(num_accelerators) else: assert get_accelerator().device_name() == 'cpu' cpu_sockets = int( @@ -450,7 +459,7 @@ def __call__(self, request): world_size = mark.args[0] break else: - world_size = self.world_size + world_size = self._fixture_kwargs.get("world_size", self.world_size) if isinstance(world_size, int): world_size = [world_size] diff --git a/tests/unit/compile_marker.py b/tests/unit/compile_marker.py new file mode 100644 index 000000000000..45ebb2f6b461 --- /dev/null +++ b/tests/unit/compile_marker.py @@ -0,0 +1,603 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +compile_tests_4c = [ + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype0]", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True] PASSED", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True] PASSED", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unknown_tag_validation[True]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype1]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-True-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-True-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-True-False]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-125m-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-neo-125M-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-350m-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-bloom-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neox-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neox-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-j-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-codegen-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-codegen-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-codegen-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-codegen-True]", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-roberta-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-t5-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]", +] +compile_tests_1c = [ + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-Triton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-Triton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-Triton-True-True]", +] diff --git a/tests/unit/inference/test_checkpoint_sharding.py b/tests/unit/inference/test_checkpoint_sharding.py index 5bae9a151a27..1466be17e134 100644 --- a/tests/unit/inference/test_checkpoint_sharding.py +++ b/tests/unit/inference/test_checkpoint_sharding.py @@ -14,6 +14,7 @@ from huggingface_hub import snapshot_download from transformers.utils import is_offline_mode from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.accelerator import get_accelerator if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -44,6 +45,8 @@ def model_name(request): @pytest.fixture(params=[torch.float16, torch.int8], ids=["fp16", "int8"]) def dtype(request): + if request.param not in get_accelerator().supported_dtypes(): + pytest.skip(f"{request.param} not supported by {get_accelerator().device_name()}.") return request.param @@ -73,7 +76,9 @@ def run(self, model_name, class_tmpdir): class TestCheckpointShard(DistributedTest): world_size = 2 - def test(self, model_name, dtype, class_tmpdir, save_shard): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test(self, model_name, dtype, class_tmpdir, save_shard, compile_mode): + world_size = int(os.getenv("WORLD_SIZE", "1")) inf_config = { "replace_with_kernel_inject": True, @@ -92,6 +97,8 @@ def test(self, model_name, dtype, class_tmpdir, save_shard): model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16) model = model.eval() model = deepspeed.init_inference(model, config=inf_config) + if compile_mode: + model.compile() check_dtype(model, dtype) @@ -99,7 +106,8 @@ def test(self, model_name, dtype, class_tmpdir, save_shard): class TestCheckpointShardinAutoTP(DistributedTest): world_size = 2 - def test(self, model_name, class_tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test(self, model_name, class_tmpdir, compile_mode): def write_checkpoints_json(model_name, class_tmpdir): import json @@ -137,3 +145,5 @@ def write_checkpoints_json(model_name, class_tmpdir): model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16) model = model.eval() model = deepspeed.init_inference(model, config=inf_config) + if compile_mode: + model.compile() diff --git a/tests/unit/inference/test_inference.py b/tests/unit/inference/test_inference.py index 36003319856c..f996240309ab 100644 --- a/tests/unit/inference/test_inference.py +++ b/tests/unit/inference/test_inference.py @@ -30,6 +30,7 @@ from deepspeed.ops.op_builder import OpBuilder from unit.common import DistributedTest +from transformers import BertLayer rocm_version = OpBuilder.installed_rocm_version() if rocm_version != (0, 0): @@ -67,6 +68,36 @@ "facebook/opt-125m", # 125m, 1.7B, ..., 175B variants have the same model architecture. "facebook/opt-350m", # 350m applies layer norm after attention layer which is different than other variants. ] +ModelsInjectionPolicyMap = { + "distilbert/distilbert-base-cased-distilled-squad": { + BertLayer: ("output_layer_norm", ) + }, + "openai-community/gpt2": { + BertLayer: ("mlp", ) + }, + "distilbert/distilgpt2": { + BertLayer: ("mlp", ) + }, + "Norod78/hebrew-bad_wiki-gpt_neo-tiny": { + BertLayer: ("out_proj", ) + }, + "EleutherAI/gpt-j-6b": { + BertLayer: ("mlp", ) + }, + "EleutherAI/pythia-70m-deduped": { + BertLayer: ("mlp", ) + }, + "bigscience/bloom-560m": { + BertLayer: ("mlp", ) + }, + "facebook/opt-125m": { + BertLayer: ("out_proj", ) + }, + "facebook/opt-350m": { + BertLayer: ("out_proj", ) + }, +} +DEFAULT_INJECTION_POLICY = {BertLayer: ("output.dense", )} _test_models = set(_bert_models + _roberta_models + _gpt_models + _opt_models) _test_tasks = [ "fill-mask", "question-answering", "text-classification", "token-classification", "text-generation", @@ -86,7 +117,8 @@ def _hf_model_list() -> List[ModelInfo]: cache_dir = os.getenv("HF_HOME", "~/.cache/huggingface") cache_file_path = os.path.join(cache_dir, "DS_model_cache.pkl") - cache_expiration_seconds = 60 * 60 * 24 # 1 day + num_days = os.getenv("HF_CACHE_EXPIRY_DAYS", 1) + cache_expiration_seconds = num_days * 60 * 60 * 24 # Load or initialize the cache model_data = {"cache_time": 0, "model_list": []} @@ -97,7 +129,8 @@ def _hf_model_list() -> List[ModelInfo]: current_time = time.time() # Update the cache if it has expired - if (model_data["cache_time"] + cache_expiration_seconds) < current_time: + if ((model_data["cache_time"] + cache_expiration_seconds) < current_time) or os.getenv("FORCE_UPDATE_HF_CACHE", + default=False): api = HfApi() model_data["model_list"] = [ ModelInfo(modelId=m.modelId, pipeline_tag=m.pipeline_tag, tags=m.tags) for m in api.list_models() @@ -125,6 +158,7 @@ def _hf_model_list() -> List[ModelInfo]: pytest.mt_names = [f"{m}-{t}" for m, t in pytest.model_w_tasks] +#Hugging Face model: WA. Hugging Face models were updated, causing the _test_models list to not be found in _hf_model_names. Changed the fixture from True to False. @pytest.fixture(scope="module", autouse=True) def verify_models(): # Verify all test models are registered in HF @@ -162,6 +196,11 @@ def enable_triton(request): return request.param +@pytest.fixture(params=[1, 2], ids=["ws1", "ws2"]) +def world_size(request): + return request.param + + """ Fixtures for running query """ @@ -275,11 +314,19 @@ def verify_injection(module): verify_injection(model) +# Used to Get Device name +def getDeviceId(local_rank): + device = local_rank + if get_accelerator().device_name() != 'cuda': + device = torch.device(f"{get_accelerator().device_name()}") + return device + + # Verify that test is valid def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): model, task = model_w_task msg = "" - if enable_cuda_graph and (torch_info["cuda_version"] == "0.0"): + if enable_cuda_graph and (torch_info["cuda_version"] == "0.0") and get_accelerator().device_name() != 'hpu': msg = "CUDA not detected, cannot use CUDA Graph" elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"): msg = "CUDA Graph is only available in torch versions >= 1.10" @@ -296,6 +343,8 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): msg = f"Bloom models only support half precision, cannot use dtype {dtype}" elif (model not in _bert_models + _roberta_models) and enable_cuda_graph: msg = "Non bert/roberta models do no support CUDA Graph" + elif not get_accelerator().is_triton_supported() and enable_triton: + msg = f"Triton is not supported for {get_accelerator().device_name()}." elif enable_triton and not (dtype in [torch.half]): msg = "Triton is for fp16" elif enable_triton and not deepspeed.HAS_TRITON: @@ -311,7 +360,9 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): return msg -@pytest.mark.inference +@pytest.mark.parametrize('compile_mode', [True, False]) +@pytest.mark.parametrize("replace_with_kernel_inject", [True, False]) +@pytest.mark.nightly class TestModelTask(DistributedTest): world_size = 1 @@ -324,6 +375,8 @@ def test( query, inf_kwargs, assert_fn, + replace_with_kernel_inject, + compile_mode, perf_meas=True, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton) @@ -366,11 +419,20 @@ def test( 'use_triton': enable_triton, 'triton_autotune': False, } + if not replace_with_kernel_inject: + if get_accelerator().device_name() != 'hpu': + pytest.skip("Kernel Inject False validation for HPU tests.", ) + injection_policy = ModelsInjectionPolicyMap.get(model, DEFAULT_INJECTION_POLICY) + args['injection_policy'] = injection_policy + args['replace_with_kernel_inject'] = False if pipe.tokenizer.model_max_length < deepspeed.ops.transformer.inference.config.DeepSpeedInferenceConfig( ).max_out_tokens: args.update({'max_out_tokens': pipe.tokenizer.model_max_length}) pipe.model = deepspeed.init_inference(pipe.model, **args) - check_injection(pipe.model) + if compile_mode: + pipe.model.compile() + if replace_with_kernel_inject: + check_injection(pipe.model) # Warm-up queries for perf measurement #for i in range(10): # _ = pipe(query, **inf_kwargs) @@ -397,6 +459,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize("model_w_task", [("EleutherAI/gpt-neo-1.3B", "text-generation"), ("EleutherAI/gpt-neox-20b", "text-generation"), @@ -413,6 +476,7 @@ def test( query, inf_kwargs, assert_fn, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -433,6 +497,8 @@ def test( mp_size=self.world_size, dtype=dtype, replace_with_kernel_inject=True) + if compile_mode: + pipe.model.compile() check_injection(pipe.model) # Switch device to GPU so that input tensors are not on CPU pipe.device = torch.device(get_accelerator().device_name(local_rank)) @@ -443,6 +509,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.inference @pytest.mark.parametrize("model_w_task", [("openai-community/gpt2", "text-generation")], ids=["gpt2"]) class TestLowCpuMemUsage(DistributedTest): @@ -454,6 +521,7 @@ def test( query, inf_kwargs, assert_fn, + compile_mode, ): model, task = model_w_task dtype = torch.float16 @@ -461,20 +529,22 @@ def test( pytest.skip(f"Acceleraor {get_accelerator().device_name()} does not support {dtype}.") local_rank = int(os.getenv("LOCAL_RANK", "0")) - - pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=local_rank, framework="pt") + device = getDeviceId(local_rank) + pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=device, framework="pt") bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=self.world_size, dtype=dtype, replace_method="auto", replace_with_kernel_inject=True) - + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize( "model_w_task, injection_policy", @@ -490,7 +560,6 @@ def test( ) @pytest.mark.parametrize("dtype", [torch.float], ids=["fp32"]) class TestInjectionPolicy(DistributedTest): - world_size = [1, 2] def test( self, @@ -500,6 +569,8 @@ def test( inf_kwargs, assert_fn, dtype, + world_size, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -507,7 +578,6 @@ def test( model, task = model_w_task local_rank = int(os.getenv("LOCAL_RANK", "0")) - world_size = int(os.getenv("WORLD_SIZE", "2")) pipe = pipeline(task, model=model, @@ -519,6 +589,8 @@ def test( mp_size=world_size, dtype=dtype, injection_policy=injection_policy) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -526,6 +598,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize( "model_w_task", @@ -543,6 +616,7 @@ def test( inf_kwargs, assert_fn, dtype, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -566,6 +640,8 @@ def test( bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -580,6 +656,7 @@ def test_odd_world_size( inf_kwargs, assert_fn, dtype, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -598,6 +675,8 @@ def test_odd_world_size( bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -605,6 +684,7 @@ def test_odd_world_size( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.nightly @pytest.mark.parametrize( "model_family, model_name", @@ -619,7 +699,7 @@ class TestLMCorrectness(DistributedTest): world_size = 1 exec_timeout = 1200 # Give these tests longer to complete - def test(self, model_family, model_name, task): + def test(self, model_family, model_name, task, compile_mode): # imports here to avoid import errors when pytest collects tests import lm_eval import lm_eval.models @@ -650,7 +730,7 @@ def no_pool_bootstrap_stderr(f, xs, iters): dtype = torch.half lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}", {"device": "cpu"}) - setattr(lm, model_family, getattr(lm, model_family).half().to(device)) + setattr(lm, model_family, getattr(lm, model_family).to(dtype=dtype).to(device)) lm._device = device else: if get_accelerator().device_name() == 'hpu': @@ -677,6 +757,8 @@ def no_pool_bootstrap_stderr(f, xs, iters): replace_with_kernel_inject=True, enable_cuda_graph=False, ) + if compile_mode: + ds_model.compile() check_injection(ds_model) setattr(lm, model_family, ds_model) get_accelerator().synchronize() diff --git a/tests/unit/inference/test_model_profiling.py b/tests/unit/inference/test_model_profiling.py index 23e49f89025b..319055d0ea55 100644 --- a/tests/unit/inference/test_model_profiling.py +++ b/tests/unit/inference/test_model_profiling.py @@ -16,6 +16,9 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) + @pytest.mark.inference @pytest.mark.parametrize("use_cuda_events", [True, False]) diff --git a/tests/unit/inference/v2/inference_test_utils.py b/tests/unit/inference/v2/inference_test_utils.py index d63c51267e51..9405b6fde724 100644 --- a/tests/unit/inference/v2/inference_test_utils.py +++ b/tests/unit/inference/v2/inference_test_utils.py @@ -44,3 +44,10 @@ def allclose(x, y, tolerances: Tuple[int, int] = None): else: rtol, atol = tolerances return torch.allclose(x, y, rtol=rtol, atol=atol) + + +def skip_on_inference_v2(): + if get_accelerator().device_name() == 'hpu': + return True + else: + return False diff --git a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py index 376188b92565..49dbdc715556 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDABiasActivation -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_bias_act_implementation(input: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py index 864db6204a16..9d8d2c177607 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py @@ -10,7 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import BlasLibLinear -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # Note: only testing with FP16 and BF16 because we use TF32 on Ampere and we don't have a good # set of tolerances. Since this is just on top of BLAS though, the test is more about diff --git a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py index 8cb95a6cdcba..4232cb7a6324 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAGatedActivation from deepspeed.inference.v2.inference_utils import ActivationType -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_geglu_implementation(input: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py index 0b489894bb9b..0549316081ee 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPostLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py index ffb748e57af2..4da5173d5f53 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPreLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py index 63b16da171c9..16357d0f2967 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_rms_norm(vals: torch.Tensor, gamma: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor: diff --git a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py index ed76dabe1f4c..c396cc1268d0 100644 --- a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py +++ b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.cutlass_ops import MoEGEMM -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') SINGLE_EXPERT_CASES = [(13, 2048, 2048), (256, 1024, 4096), (278, 5120, 2048), (893, 5120, 2560)] diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py index a33c938a0608..ad88ff2c5d69 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py @@ -6,8 +6,13 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import AtomBuilder from .ragged_testing_utils import build_complex_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') Q_BLOCK_SIZE = 128 KV_BLOCK_SIZE = 128 diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py index ce5a178c9548..bb0192bbbde6 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py @@ -23,7 +23,10 @@ from deepspeed.ops.op_builder import RaggedUtilsBuilder from .ragged_testing_utils import build_batch_and_manager -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py index 5f1ef930952c..03562e48390c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py @@ -9,6 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import LinearBlockedKVCopy from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2_ops diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py index 156be9929d92..06d67777e65d 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.kernels.ragged_ops import BlockedRotaryEmbeddings, BlockedTrainedRotaryEmbeddings from deepspeed.inference.v2.ragged import RaggedBatchWrapper, DSSequenceDescriptor from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ NOTE(cmikeh2): It is very possible to see unit test failures (even on FP16) depending on when certain values are casted up to or down from float32. If we are seeing accuracy issues, we should diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py index 1feefa9ee588..e00aa85d194c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedLogitsGather -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_simple_batch +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(hidden_states: torch.Tensor, seq_lens: List[int]) -> torch.Tensor: output = torch.empty((len(seq_lens), hidden_states.shape[1]), diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py index 3907fc3e3a4b..6538a81ec00a 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py @@ -14,6 +14,10 @@ RaggedTopKGating, ) from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` and ``MoEScatter`` to produce correct inputs. If either of these kernels is broken diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py index aae459f06a6f..9edc014eae33 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py @@ -10,6 +10,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import MoEScatter, RaggedTopKGating from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` to produce correct inputs. If ``RaggedTopKGating`` is broken, these tests will fail, so double check diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py index f179f62a9b12..32d7d312a4cf 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedEmbeddingKernel -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_batch_and_manager +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(token_ids: torch.Tensor, embedding_table: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py index 5fa0c8a079f0..178512351c0f 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import RaggedTopKGating from .ragged_testing_utils import build_simple_batch -from ...inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _top_k_gating_testing_helper(n_tokens: int, n_experts: int, n_top_k: int, seed: int = 0xC0FFEE) -> None: diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py index 52ff0e134dfc..901d9d9b43e7 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py @@ -15,6 +15,10 @@ ) from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class TransformerLayerContainer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py index 07ad87e6168d..c457227d5499 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class ParentLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py index 52313cb6f202..0701b8dcc4d8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiDependencyContainer(ParameterBase): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py index b319bf6de4ad..e7ba08b3c2a8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import validate_device, SimpleParam, ListParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiParameterLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py index 06ff9047d648..5f39d3251ea9 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py @@ -6,6 +6,7 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList @@ -13,6 +14,10 @@ from deepspeed.inference.v2.model_implementations.common_parameters import * from .utils import validate_device +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class SimpleMoELayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py index 850c4c24fde6..fb7901dbf938 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # None of the logic should be dependent on head size. HEAD_SIZE = 64 diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py index aac7e5391d8f..553d604d30ee 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def round_up_to_256(x: int) -> int: diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py index 9a1cb9c09c64..86575d2176ad 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py @@ -10,6 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def fill_with_head_ids(head_size: int, n_heads_q: int, n_heads_kv: Optional[int] = None) -> torch.Tensor: diff --git a/tests/unit/inference/v2/modules/test_blas_linear_module.py b/tests/unit/inference/v2/modules/test_blas_linear_module.py index f4d0b1991238..b50819875699 100644 --- a/tests/unit/inference/v2/modules/test_blas_linear_module.py +++ b/tests/unit/inference/v2/modules/test_blas_linear_module.py @@ -13,7 +13,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSLinearConfig from deepspeed.inference.v2.modules.interfaces import DSLinearRegistry -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(hidden_states: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/modules/test_blocked_attn.py b/tests/unit/inference/v2/modules/test_blocked_attn.py index 6556aa460a44..b55909a69caf 100644 --- a/tests/unit/inference/v2/modules/test_blocked_attn.py +++ b/tests/unit/inference/v2/modules/test_blocked_attn.py @@ -16,7 +16,10 @@ from deepspeed.inference.v2.modules.interfaces import DSSelfAttentionRegistry, DSSelfAttentionBase from ..kernels.ragged_ops.ragged_testing_utils import build_batch_and_manager -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py index 386f3b3ef0b3..dc971f83f0c2 100644 --- a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py +++ b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_custom_module.py b/tests/unit/inference/v2/modules/test_custom_module.py index eb54b7a913f2..b813b715ec1e 100644 --- a/tests/unit/inference/v2/modules/test_custom_module.py +++ b/tests/unit/inference/v2/modules/test_custom_module.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.implementations import cuda_post_ln -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_cutlass_moe.py b/tests/unit/inference/v2/modules/test_cutlass_moe.py index b14ba127c6be..27ba70b88728 100644 --- a/tests/unit/inference/v2/modules/test_cutlass_moe.py +++ b/tests/unit/inference/v2/modules/test_cutlass_moe.py @@ -15,7 +15,10 @@ from deepspeed.inference.v2.modules.interfaces import DSMoERegistry from ..kernels.ragged_ops.ragged_testing_utils import build_simple_batch -from ...v2.inference_test_utils import allclose, get_dtypes +from ...v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _gating_reference(logits: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: diff --git a/tests/unit/inference/v2/modules/test_post_ln_module.py b/tests/unit/inference/v2/modules/test_post_ln_module.py index f9dcfd272170..538eb32c3c85 100644 --- a/tests/unit/inference/v2/modules/test_post_ln_module.py +++ b/tests/unit/inference/v2/modules/test_post_ln_module.py @@ -10,7 +10,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_pre_rms_module.py b/tests/unit/inference/v2/modules/test_pre_rms_module.py index bbd108a35a5a..58bf7761bafa 100644 --- a/tests/unit/inference/v2/modules/test_pre_rms_module.py +++ b/tests/unit/inference/v2/modules/test_pre_rms_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/ragged/test_blocked_allocator.py b/tests/unit/inference/v2/ragged/test_blocked_allocator.py index 4596e81c5652..6dddeff6ee9f 100644 --- a/tests/unit/inference/v2/ragged/test_blocked_allocator.py +++ b/tests/unit/inference/v2/ragged/test_blocked_allocator.py @@ -9,7 +9,12 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged.blocked_allocator import BlockedAllocator +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_manager_configs.py b/tests/unit/inference/v2/ragged/test_manager_configs.py index a5f270cced8c..c2dec7673308 100644 --- a/tests/unit/inference/v2/ragged/test_manager_configs.py +++ b/tests/unit/inference/v2/ragged/test_manager_configs.py @@ -7,7 +7,12 @@ from deepspeed.pydantic_v1 import ValidationError +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged import DSStateManagerConfig +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py index 3cb74f4c49d2..669368f9ef9d 100644 --- a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py +++ b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py @@ -14,6 +14,10 @@ RaggedBatchWrapper, DSStateManagerConfig, ) +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/moe/test_moe.py b/tests/unit/moe/test_moe.py index fdff9430a4e6..bf1ace2ba2db 100644 --- a/tests/unit/moe/test_moe.py +++ b/tests/unit/moe/test_moe.py @@ -77,6 +77,8 @@ def test(self, ep_size, zero_stage, use_residual): # E+D -- ep_size = 2 # E only -- ep_size = 4 model = SimpleMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual) + #TODO SW-179530: remove workaround when issue with lazy mode is resolved (see SW-179530). + model.to(get_accelerator().device_name()) param_group = {'params': [p for p in model.parameters()], 'name': 'random-unique-name'} params = split_params_into_different_moe_groups_for_optimizer(param_group) optimizer = torch.optim.AdamW(params=params) diff --git a/tests/unit/ops/adam/test_adamw.py b/tests/unit/ops/adam/test_adamw.py index 3b1b088766a5..39f67da55d93 100644 --- a/tests/unit/ops/adam/test_adamw.py +++ b/tests/unit/ops/adam/test_adamw.py @@ -12,6 +12,7 @@ from unit.common import DistributedTest from unit.simple_model import SimpleModel from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder if torch.half not in get_accelerator().supported_dtypes(): pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) @@ -67,6 +68,9 @@ def test(self, "cpu_offload": zero_offload } } + if (resulting_optimizer[0] == FusedAdam) and (not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]): + pytest.skip("FusedAdam is not compatible") + model = SimpleModel(10) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, diff --git a/tests/unit/ops/aio/test_aio.py b/tests/unit/ops/aio/test_aio.py old mode 100644 new mode 100755 diff --git a/tests/unit/ops/transformer/inference/inference_test_utils.py b/tests/unit/ops/transformer/inference/inference_test_utils.py index 9c7b428c0e68..9cfcae809f09 100644 --- a/tests/unit/ops/transformer/inference/inference_test_utils.py +++ b/tests/unit/ops/transformer/inference/inference_test_utils.py @@ -26,12 +26,7 @@ def get_tolerances(): def get_dtypes(): global DTYPES if DTYPES is None: - DTYPES = [torch.float16, torch.float32] - try: - if get_accelerator().is_bf16_supported(): - DTYPES.append(torch.bfloat16) - except (AssertionError, AttributeError): - pass + DTYPES = get_accelerator().supported_dtypes() return DTYPES diff --git a/tests/unit/ops/transformer/inference/test_bias_add.py b/tests/unit/ops/transformer/inference/test_bias_add.py index 843c9b889c2b..f25bbc1be692 100644 --- a/tests/unit/ops/transformer/inference/test_bias_add.py +++ b/tests/unit/ops/transformer/inference/test_bias_add.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_add import BiasAddOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -22,15 +23,8 @@ def run_bias_add_reference(activations, bias): def run_bias_add_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_add_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_add_bf16(activations, bias) - else: - return inference_module.bias_add_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasAddOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_geglu.py b/tests/unit/ops/transformer/inference/test_bias_geglu.py index d5ab13964974..05de4fbb4cf8 100644 --- a/tests/unit/ops/transformer/inference/test_bias_geglu.py +++ b/tests/unit/ops/transformer/inference/test_bias_geglu.py @@ -8,13 +8,13 @@ import deepspeed from deepspeed.ops.op_builder import InferenceBuilder from deepspeed.accelerator import get_accelerator +from deepspeed.ops.transformer.inference.op_binding.gated_activation import GatedActivationOp from deepspeed.utils.types import ActivationFuncType from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -27,10 +27,7 @@ def run_bias_geglu_reference(activations, bias): def run_bias_geglu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_GELU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_GELU) @pytest.mark.inference_ops @@ -56,17 +53,14 @@ def run_gated_silu_reference(activations, bias): def run_gated_silu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_SILU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_SILU) @pytest.mark.inference_ops @pytest.mark.parametrize("batch", [1, 2]) @pytest.mark.parametrize("sequence", [1, 128, 255]) @pytest.mark.parametrize("channels", [512, 1232, 4096]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_gated_silu(batch, sequence, channels, dtype): activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name()) bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name()) diff --git a/tests/unit/ops/transformer/inference/test_bias_gelu.py b/tests/unit/ops/transformer/inference/test_bias_gelu.py index fd82da51380c..b69030e87ace 100644 --- a/tests/unit/ops/transformer/inference/test_bias_gelu.py +++ b/tests/unit/ops/transformer/inference/test_bias_gelu.py @@ -8,13 +8,14 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp from .inference_test_utils import allclose, get_dtypes from packaging import version as pkg_version if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -25,15 +26,8 @@ def run_bias_gelu_reference(activations, bias): def run_bias_gelu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_gelu_bf16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_relu.py b/tests/unit/ops/transformer/inference/test_bias_relu.py index 881af78e92cf..57134665b241 100644 --- a/tests/unit/ops/transformer/inference/test_bias_relu.py +++ b/tests/unit/ops/transformer/inference/test_bias_relu.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_relu import BiasReluOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -23,15 +24,8 @@ def run_bias_relu_reference(activations, bias): def run_bias_relu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_relu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_relu_bf16(activations, bias) - else: - return inference_module.bias_relu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasReluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_gelu.py b/tests/unit/ops/transformer/inference/test_gelu.py index 675860b00bdb..beb74d09ab30 100644 --- a/tests/unit/ops/transformer/inference/test_gelu.py +++ b/tests/unit/ops/transformer/inference/test_gelu.py @@ -7,11 +7,12 @@ import torch import deepspeed from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -45,13 +46,8 @@ def run_gelu_ds(activations, use_triton_ops=False): device = deepspeed.accelerator.get_accelerator().device_name() channels = activations.shape[-1] bias = torch.zeros((channels), dtype=activations.dtype, device=device) - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_layer_norm.py b/tests/unit/ops/transformer/inference/test_layer_norm.py index 9eac612aa29c..2912807e9f43 100644 --- a/tests/unit/ops/transformer/inference/test_layer_norm.py +++ b/tests/unit/ops/transformer/inference/test_layer_norm.py @@ -8,6 +8,7 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from .inference_test_utils import allclose, get_dtypes, assert_almost_equal try: import triton # noqa: F401 # type: ignore @@ -21,8 +22,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): vals_f = vals.to(torch.float32) @@ -32,10 +31,7 @@ def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): def ds_implementation(vals, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm(vals, gamma, beta, epsilon) + return LayerNormOp()(vals, gamma, beta, epsilon) def ds_triton_implementation(vals, gamma, beta, epsilon): @@ -83,10 +79,7 @@ def residual_ref_implementation(vals, bias, res, gamma, beta, epsilon, channels, def residual_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module._layer_norm_residual(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual(vals, bias, res, gamma, beta, epsilon) def residual_ds_triton_implementation(vals, bias, res, gamma, beta, epsilon): @@ -137,10 +130,7 @@ def residual_store_ref_implementation(vals, bias, res, gamma, beta, epsilon, cha def residual_store_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py index e1c8127a83ac..dcf9f16baaf1 100644 --- a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py +++ b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py @@ -8,24 +8,20 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.moe_res_matmul import MoEResMatmulOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def run_moe_res_matmul_reference(residual, coef1, coef2, output): return residual * coef1 + output * coef2 def run_moe_res_matmul_ds(residual, coef, output): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() coef_t = coef.transpose(-1, -2).contiguous() - return inference_module.moe_res_matmul(residual, coef_t, output) + return MoEResMatmulOp()(residual, coef_t, output) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_residual_add.py b/tests/unit/ops/transformer/inference/test_residual_add.py index 91830e25fc81..807da4904341 100644 --- a/tests/unit/ops/transformer/inference/test_residual_add.py +++ b/tests/unit/ops/transformer/inference/test_residual_add.py @@ -8,6 +8,8 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding import ResidualAddOp from .inference_test_utils import get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: @@ -36,11 +38,6 @@ def allclose(x, y): return torch.allclose(x, y, rtol=rtol, atol=atol) -@pytest.fixture(scope="module") -def inference_module(): - return InferenceBuilder().load() - - def res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size=1, pre_attn_norm=True): if pre_attn_norm: hidden_state += (residual + final_bias + attn_output + attn_bias) / mp_size @@ -75,8 +72,8 @@ def run_residual_add_reference(hidden_state, residual, attn_output, attn_bias, f @pytest.mark.parametrize("mp_size", [1, 2]) @pytest.mark.parametrize("pre_attn_norm", [True, False]) @pytest.mark.parametrize("use_triton_ops", [True, False]) -def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, - pre_attn_norm, use_triton_ops): +def test_residual_add(batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, pre_attn_norm, + use_triton_ops): if not deepspeed.HAS_TRITON and use_triton_ops: pytest.skip("triton has to be installed for the test") ds_out = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) @@ -96,19 +93,9 @@ def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_ if use_triton_ops: from deepspeed.ops.transformer.inference.triton import residual_add_bias ds_out = residual_add_bias(*res_add_args) - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - elif dtype == torch.bfloat16: - ds_out = inference_module.residual_add_bias_bf16(*res_add_args) else: - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - else: - raise ValueError(f"Unsupported dtype: {dtype}") + config = DeepSpeedInferenceConfig(dtype=dtype) + ds_out = ResidualAddOp(config).residual_add_func(*res_add_args) if not allclose(ds_out, ref_out): print((ds_out - ref_out).abs().max()) diff --git a/tests/unit/ops/transformer/inference/test_rms_norm.py b/tests/unit/ops/transformer/inference/test_rms_norm.py index 508a40e12e8d..ed500ec16f9f 100644 --- a/tests/unit/ops/transformer/inference/test_rms_norm.py +++ b/tests/unit/ops/transformer/inference/test_rms_norm.py @@ -8,13 +8,13 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder # type: ignore +from deepspeed.ops.transformer.inference.op_binding.pre_rms_norm import PreRMSNormOp +from deepspeed.ops.transformer.inference.op_binding.rms_norm import RMSNormOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, epsilon): variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) @@ -27,10 +27,7 @@ def ref_implementation(vals, gamma, epsilon): def ds_implementation(vals, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.rms_norm(vals, gamma, epsilon) + return RMSNormOp()(vals, gamma, epsilon) @pytest.mark.inference_ops @@ -51,10 +48,7 @@ def test_rms_norm(batch, seq_len, channels, dtype): def pre_ds_implementation(vals, residual, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.pre_rms_norm(vals, residual, gamma, epsilon) + return PreRMSNormOp()(vals, residual, gamma, epsilon) def pre_ref_implementation(vals, residual, gamma, epsilon): @@ -74,7 +68,7 @@ def pre_ref_implementation(vals, residual, gamma, epsilon): @pytest.mark.parametrize("batch", [1, 32]) @pytest.mark.parametrize("seq_len", [1, 128]) @pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_pre_norm(batch, seq_len, channels, dtype): device = get_accelerator().current_device_name() vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device) diff --git a/tests/unit/ops/transformer/inference/test_softmax.py b/tests/unit/ops/transformer/inference/test_softmax.py index 7d0d6e14b651..c44c6e171aaa 100644 --- a/tests/unit/ops/transformer/inference/test_softmax.py +++ b/tests/unit/ops/transformer/inference/test_softmax.py @@ -11,7 +11,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None diff --git a/tests/unit/runtime/half_precision/test_bf16.py b/tests/unit/runtime/half_precision/test_bf16.py index 0af14abc3be5..99339e35ba4f 100644 --- a/tests/unit/runtime/half_precision/test_bf16.py +++ b/tests/unit/runtime/half_precision/test_bf16.py @@ -10,7 +10,7 @@ from unit.common import DistributedTest from deepspeed.ops.op_builder import CPUAdamBuilder from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader -from unit.util import bf16_required_version_check +from unit.util import bf16_required_version_check, hpu_lazy_enabled from deepspeed import comm as dist from deepspeed.accelerator import get_accelerator @@ -197,6 +197,9 @@ def test(self, optimizer_constructor, zero_stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -275,6 +278,9 @@ def test(self, stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, @@ -300,9 +306,10 @@ def test(self, comp_type, comm_type): " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) - if comp_type == torch.float16 or comm_type == torch.float16: - if not get_accelerator().is_fp16_supported(): - pytest.skip("fp16 is not supported") + if comm_type and (comp_type not in get_accelerator().supported_dtypes() + or comm_type not in get_accelerator().supported_dtypes()): + pytest.skip( + f"comp_type:{comp_type}, comm_type:{comm_type} not supported by {get_accelerator().device_name()}.") type_str = {torch.float16: "fp16", torch.bfloat16: "bf16"} @@ -326,6 +333,12 @@ def test(self, comp_type, comm_type): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + # TODO: remove this when the following is resolved: + # https://jira.habana-labs.com/browse/SW-137450 + config_dict["fp16"]["initial_scale_power"] = 30 + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, diff --git a/tests/unit/runtime/half_precision/test_fp16.py b/tests/unit/runtime/half_precision/test_fp16.py index dba15a969459..7690a8e3a2c7 100644 --- a/tests/unit/runtime/half_precision/test_fp16.py +++ b/tests/unit/runtime/half_precision/test_fp16.py @@ -12,7 +12,8 @@ from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader, SimpleMoEModel, sequence_dataloader from deepspeed.utils.torch import required_torch_version from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import CPUAdamBuilder, FusedLambBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedLambBuilder, FusedAdamBuilder +from unit.util import hpu_lazy_enabled from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer try: @@ -26,6 +27,7 @@ pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP32GradClip(DistributedTest): world_size = 2 @@ -60,6 +62,7 @@ def test(self): model.step() +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP16(DistributedTest): world_size = 2 @@ -207,6 +210,8 @@ def mock_unscale_and_clip_grads(total_norm, apply_scale=True): engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="fused adam is not compatible") def test_fused_gradnorm(self, monkeypatch): if not get_accelerator().is_fp16_supported(): pytest.skip("fp16 is not supported") @@ -240,6 +245,7 @@ def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True) engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") @pytest.mark.parametrize("fused_lamb_legacy", [(False), (True)]) @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="FusedLambBuilder has not been implemented on this system.") @@ -613,6 +619,9 @@ def test(self, zero_stage, optimizer_constructor): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -748,6 +757,9 @@ def test(self, stage): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) diff --git a/tests/unit/runtime/test_autocast.py b/tests/unit/runtime/test_autocast.py index 9176770afda7..9692f105d69d 100644 --- a/tests/unit/runtime/test_autocast.py +++ b/tests/unit/runtime/test_autocast.py @@ -25,6 +25,7 @@ def test_missing_amp_autocast(self, half_op): output = ds_linear(input) assert output.dtype == ds_linear.weight.dtype + @pytest.mark.skipif(get_accelerator().amp() is None, reason='amp is not installed') def test_disable_autocast_linear(self, half_op): amp = get_accelerator().amp() diff --git a/tests/unit/runtime/test_ds_config_dict.py b/tests/unit/runtime/test_ds_config_dict.py index c11c63d04867..764563f4da31 100644 --- a/tests/unit/runtime/test_ds_config_dict.py +++ b/tests/unit/runtime/test_ds_config_dict.py @@ -9,6 +9,7 @@ import json import hjson import argparse +import torch from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.accelerator import get_accelerator @@ -256,7 +257,11 @@ def test(self, base_config): model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=torch.half) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) with pytest.raises(AssertionError): diff --git a/tests/unit/runtime/test_ds_initialize.py b/tests/unit/runtime/test_ds_initialize.py index 9ff99f169f7a..d5a070dc691d 100644 --- a/tests/unit/runtime/test_ds_initialize.py +++ b/tests/unit/runtime/test_ds_initialize.py @@ -11,7 +11,7 @@ from unit.simple_model import SimpleModel, random_dataloader from unit.common import DistributedTest -from unit.util import bf16_required_version_check, required_amp_check +from unit.util import bf16_required_version_check, required_amp_check, hpu_lazy_enabled import deepspeed from deepspeed.ops.adam import FusedAdam @@ -230,6 +230,9 @@ def test(self, optimizer_extension, model_dtype, grad_accum_dtype): hidden_dim = 10 model = SimpleModel(hidden_dim) + # TODO: SW-145674 remove this WA when SW-145671 is resolved. + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) model_parameters = list(model.parameters()) if key in is_supported: diff --git a/tests/unit/runtime/zero/test_zero.py b/tests/unit/runtime/zero/test_zero.py index 7262a1b2c998..34f243f25978 100644 --- a/tests/unit/runtime/zero/test_zero.py +++ b/tests/unit/runtime/zero/test_zero.py @@ -25,6 +25,7 @@ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint from deepspeed.runtime.zero.utils import ZeRORuntimeException from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled def run_unbalanced_gradients(model, data_loader): @@ -289,6 +290,8 @@ def forward(self, x, y): world_size = dist.get_world_size() n_layers = world_size * 2 model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) optim_groups = [ { @@ -987,8 +990,8 @@ class ModelWhereParentInitializesChildWeights(Module): def __init__(self) -> None: super().__init__() - - self.linear = Linear(12, 1) + dev = get_accelerator().device_name() + self.linear = Linear(12, 1, device=dev) self.apply(self.__init_weights) @@ -1522,6 +1525,9 @@ def test(self, force_ds_optim): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) diff --git a/tests/unit/skip_marker.py b/tests/unit/skip_marker.py new file mode 100644 index 000000000000..fbdff3589938 --- /dev/null +++ b/tests/unit/skip_marker.py @@ -0,0 +1,320 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_skip_tests = {} + +g1_lazy_skip_tests = { + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run" +} + +g2_lazy_skip_tests = { + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "Stuck, SW-190067.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run" +} + +g3_lazy_skip_tests = { + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": "test hang patch:430071", +} +hpu_eager_skip_tests = {} + +g1_eager_skip_tests = { + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]": + "Flaky Segfault. Stuck", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-False]": + "Flaky Segfault. Stuck", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-False]": + "Struck observed", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-False]": + "Flaky struck observed", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True]": + "Skip struck and fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "Test Hang", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]": + "Test Hang", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]": + "test Hang", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype0]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True]": + "Skip due to flaky hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "test Hang", +} + +g2_eager_skip_tests = { + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "Stuck, SW-190067.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]": + "Test Hang", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]": + "Flaky Segfault. Stuck", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]": + "Test Hang", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]": + "Test Hang", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-fp16-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "test Hang", +} +g3_eager_skip_tests = { + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]": + "Skip workload takes longer time to run", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-True]": + "Skip struck for longer duration", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip struck for longer duration", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "test hang patch:430071", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "test hang patch:430071", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]": + "Skip due to SW-193097", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]": + "GC failed so skip to check", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-noTriton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-Triton-True-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-noCG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-Triton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-CG-noTriton-False-False]": + "Skip bloom due to process struck and also fail", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]": + "Flaky Segfault. Stuck", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp32-CG-Triton-True-True]": + "Test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "test Hang", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-True]": + "test Hang", +} + +gpu_skip_tests = { + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "Disabled as it is causing test to stuck. SW-163517.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail not supported", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "skip: timeout triggered", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-False]": + "skip: timeout triggered", +} diff --git a/tests/unit/util.py b/tests/unit/util.py index feec326ede6c..8ae866fdd9aa 100644 --- a/tests/unit/util.py +++ b/tests/unit/util.py @@ -5,6 +5,8 @@ import pytest import torch +import os +import multiprocessing from deepspeed.accelerator import get_accelerator, is_current_accelerator_supported from deepspeed.git_version_info import torch_info @@ -67,3 +69,57 @@ def required_amp_check(): return False else: return True + + +def worker(proc_id, return_dict): + #TODO SW-114787: move to new api outside experimental + import habana_frameworks.torch.utils.experimental as htexp + deviceType = htexp._get_device_type() + if deviceType == htexp.synDeviceType.synDeviceGaudi: + return_dict['devicetype'] = "Gaudi" + elif deviceType == htexp.synDeviceType.synDeviceGaudi2: + return_dict['devicetype'] = "Gaudi2" + elif deviceType == htexp.synDeviceType.synDeviceGaudi3: + return_dict['devicetype'] = "Gaudi3" + else: + return_dict['devicetype'] = None + assert False, f'Unexpected hpu device Type: {deviceType}' + + +def get_hpu_dev_version(): + hpu_dev = None + if get_accelerator().device_name() != 'hpu': + return hpu_dev + if os.getenv("DEEPSPEED_UT_HL_DEVICE", default=None): + hpu_dev = os.getenv("DEEPSPEED_UT_HL_DEVICE") + if hpu_dev not in ["Gaudi", "Gaudi2", "Gaudi3"]: + manager = multiprocessing.Manager() + return_dict = manager.dict() + proc_id = 0 + multiprocessing.set_start_method("spawn", force=True) + p = multiprocessing.Process(target=worker, args=(proc_id, return_dict)) + p.start() + p.join() + try: + dev_type = return_dict['devicetype'] + except: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + p.terminate() + exit_code = p.exitcode + if exit_code: + assert False, 'HPU dev type process exit with: {}'.format(exit_code) + if dev_type in ["Gaudi", "Gaudi2", "Gaudi3"]: + hpu_dev = dev_type + os.environ['DEEPSPEED_UT_HL_DEVICE'] = dev_type + return dev_type + else: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + else: + return hpu_dev + + +def hpu_lazy_enabled(): + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + return thpu.is_lazy() + return False diff --git a/tests/unit/xfail_marker.py b/tests/unit/xfail_marker.py new file mode 100644 index 000000000000..c8f197ee1c05 --- /dev/null +++ b/tests/unit/xfail_marker.py @@ -0,0 +1,5797 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_xfail_tests = {} + +g1_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to FP16 not supported.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to FP16 not supported.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_model_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTP::test[falcon-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-codegen-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "Xfail, FP16 not supported.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to FP16 not supported.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to FP16 not supported.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo-False]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail, due to FP16 not supported.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail, due to FP16 not supported.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "Xfail, due to FP16 not supported.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "Xfail, due to FP16 not supported.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail, due to Gaudi1 does not support FP16.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to Gaudi1 does not support FP16.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True]": + "Xfail, FP16 not supported.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Not supported on Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Not supported on Gaudi1", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + " Comm Init Rank Error.", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + " Comm Init Rank Error.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "Fp16 not supported by Gaudi1", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "Xfail, due to FP16 not supported", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "Xfail, due to FP16 not supported.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "Xfail, due to FP16 not supported.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "Xfail, due to FP16 not supported.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]": + "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Fp16 not supported by Gaudi1.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "fp16 is not supported Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m]-False": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-False]": + "Xfail due to FP16 not supported on gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail due to FP16 not supported on gaudi", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-False]": + "Xfail due to FP16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-False]": + "xfail due to FP16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[2]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[1]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[0]": + "Xfail due to fp16 not supported", +} + +g2_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo-False]": + "Xfail, due to SW-.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail, due to SW-.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-j-False]": + "Xfail, due to SW-162660.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-162575.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "Xfail, due to SW-162660.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "Xfail, due to SW-145262.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "Xfail, due to SW-164577.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "Xfail, due to SW-164593.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "Xfail, due to SW-164606.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "Xfail, due to SW-156782.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail, due to SW-163098.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": # noqa: F601 + "Xfail, due to SW-170181.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": # noqa: F601 + "Xfail, due to SW-168442.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": # noqa: F601 + "Xfail, due to SW-162575.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]" : "Xfail, due to SW-178730.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]" : "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]" : "Xfail, due to SW-175716.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-False-False]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-True-False]":"Xfail due to SW-181935", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]":"Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]":"Xfail due to SW-182753", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test":"Xfail due to SW-", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]":"Xfail due to SW-182759", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor":"Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible":"Xfail due to SW-182766", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]":"xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-True-False]":"Xfail due to SW-181935", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": "xfail due to SW-184834", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]":" xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]":" xfail due to SW-185015", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization":"Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization":"Xfail due to SW-182766", + +} + +g3_lazy_xfail_tests = { + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail due to SW-168442", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "xfail due to SW-185015", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail, due to SW-168583", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-182502", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "Xfail, due to SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "Xfail, due to SW-176903", + "unit/ops/transformer/inference/test_bias_geglu.py::test_bias_geglu[dtype1-512-1-1]": + "Xfail flaky", + "unit/ops/transformer/inference/test_bias_geglu.py::test_gated_silu[dtype0-512-1-1]": + "Xfail flaky", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "Xfail due to SW-181939", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConvergence::test[gpt2]": + "XFail for now", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/runtime/zero/test_zero_nesting_init.py::TestNestedParallelInit::test_nested_parallel_init": + "Xfail download issue", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "Xfail due to SW-182766", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "Xfail, due to SW-164551", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail due to RuntimeError: Incompatible input shapes, broadcast not possible. Tensor1 Size: 5 5 16 1 Tensor2 Size: 5 1 8During handling of the above exception, another exception occurred", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail due to SW-188513", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "xfail due to SW-194902", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "Xfail due to SW-182766", +} + +hpu_eager_xfail_tests = {} + +g1_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo-False]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-False]": + "Xfail, due to SW-175376.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/inference/test_inference.py::TestInjectionPolicy::test[fp32-t5-False]": + "Xfail, due to SW-182668", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-False]": + "Xfail, due to SW-182669", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian]": + "Xfail, due to SW-182670", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-182671", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Xfail due to SW-182509", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "Xfail due to SW-182509", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to OP not implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to OP not implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-False-False]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to FP16 not supported on gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail due to FP16 not supported on gaudi", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-False]": + "Xfail, due to SW-.", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-t5-False]": + "Xfail, due to SW-.", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Fp16 not supported", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "FP16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-False]": + "Xfail due to FP16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-False-False]": + "Xfail due to sw-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-t5-True]": + "xfail due to sw-187946", + "unit/moe/test_moe.py::TestSimpleMoE::test[2]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[1]": + "Xfail due to fp16 not supported", + "unit/moe/test_moe.py::TestSimpleMoE::test[0]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-195011", + "unit/runtime/test_multi_output_model.py::TestThreeOutputModel::test": + "xfail due to 198794", + "unit/runtime/test_multi_output_model.py::TestTwoOutputModel::test": + "xfail due to 198794", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "xfail due to SW-199012", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-False]": + "xfail due to SW-182671", +} + +g2_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "Xfail, due to SW-176903.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "Xfail, due to SW-176903.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail, due to SW-196522", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-False-False]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "Xfail due to SW-182766", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-181935", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton-True-False]": + "Xfail due to SW-181935", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-codegen-False]": + "Xfail, due to SW-178702", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-182502", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to op not been implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-False]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton-False-False]": + "xfail due to SW-184834", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + " xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + " xfail due to SW-185015", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail due to SW-182502", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inf.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Xfail due to 189259", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True]": + "Xfail due to SW-189262", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "Xfail due to SW-187946", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "Xfail due to SW-187946", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]": + "Xfail due to sw-193404", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail due to SW-182766", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail due to SW-199012", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail due to SW-199012", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "xfail due to SW-163097", +} +g3_eager_xfail_tests = { + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype1]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to SW-196568 This op had not been implemented on HPU backend", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "xfail due to SW-176905", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "xfail due to SW-168596", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "xfail due to SW-168596", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "xfail due to SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "xfail due to SW-176903", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail due to SW-188274", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail due to SW-168442", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "xfail due to SW-163097", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail, due to SW-196522.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "xfail due to SW-185015", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Graphic compile failed", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-False]": + "Graph compile failed", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-196571 Assertion error", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Xfail due to SW-196522", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-roberta-True]": + "Xfail due to sw-193404", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5-True]": + "Xfail due to sw-193404", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail due to SW-182766", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail due to SW-199012", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail due to SW-199012", +} +gpu_xfail_tests = { + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Test requires higher memory.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Test requires higher memory.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[33-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_multi_sequence": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[63-1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6784]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[50304-6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding_offset": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[32000-5120]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[1024-1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[433-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_negative_logits": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[433-2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_determinism": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[2048-8192]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[32]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_successive_inputs": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[4096-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[6144-3072]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.bf16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.fp16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[13-2048-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[256-1024-4096]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[893-5120-2560]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[278-5120-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Test requires higher memory.", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-True-resulting_optimizer6]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-False-resulting_optimizer2]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-True-resulting_optimizer14]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-False-resulting_optimizer10]": + "Xfail, due to SW-176845", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail due to SW-177890 and SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Callable]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[False]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[True]": + "Xfail due to OOM", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_unfused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Compile tests not supported on Titan-XP", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-_LRScheduler]": + "Cuda OOM", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Same failure in Vanilla.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail due to SW-196379", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-196379", +}