Skip to content

Commit c68f5b0

Browse files
authored
feature: Add FasterTransformer DJL support (#3823)
1 parent 025c2fc commit c68f5b0

File tree

6 files changed

+106
-29
lines changed

6 files changed

+106
-29
lines changed

doc/frameworks/djl/sagemaker.djl_inference.rst

+8
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,14 @@ HuggingFaceAccelerateModel
2626
:undoc-members:
2727
:show-inheritance:
2828

29+
FasterTransformerModel
30+
---------------------------
31+
32+
.. autoclass:: sagemaker.djl_inference.model.FasterTransformerModel
33+
:members:
34+
:undoc-members:
35+
:show-inheritance:
36+
2937
DJLPredictor
3038
---------------------------
3139

doc/frameworks/djl/using_djl.rst

+13-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ With the SageMaker Python SDK, you can use DJL Serving to host models that have
2323
These can either be models you have trained/fine-tuned yourself, or models available publicly from the HuggingFace Hub.
2424
DJL Serving in the SageMaker Python SDK supports hosting models for the popular HuggingFace NLP tasks, as well as Stable Diffusion.
2525

26-
You can either deploy your model using DeepSpeed or HuggingFace Accelerate, or let DJL Serving determine the best backend based on your model architecture and configuration.
26+
You can either deploy your model using DeepSpeed, FasterTransformer, or HuggingFace Accelerate, or let DJL Serving determine the best backend based on your model architecture and configuration.
2727

2828
.. code:: python
2929
@@ -63,11 +63,23 @@ If you want to use a specific backend, then you can create an instance of the co
6363
number_of_partitions=2, # number of gpus to partition the model across
6464
)
6565
66+
# Create a model using the FasterTransformer backend
67+
68+
fastertransformer_model = FasterTransformerModel(
69+
"s3://my_bucket/my_saved_model_artifacts/", # This can also be a HuggingFace Hub model id
70+
"my_sagemaker_role",
71+
data_type="fp16",
72+
task="text-generation",
73+
tensor_parallel_degree=2, # number of gpus to partition the model across
74+
)
75+
6676
# Deploy the model to an Amazon SageMaker Endpoint and get a Predictor
6777
deepspeed_predictor = deepspeed_model.deploy("ml.g5.12xlarge",
6878
initial_instance_count=1)
6979
hf_accelerate_predictor = hf_accelerate_model.deploy("ml.g5.12xlarge",
7080
initial_instance_count=1)
81+
fastertransformer_predictor = fastertransformer_model.deploy("ml.g5.12xlarge",
82+
initial_instance_count=1)
7183
7284
Regardless of which way you choose to create your model, a ``Predictor`` object is returned. You can use this ``Predictor``
7385
to do inference on the endpoint hosting your DJLModel.

src/sagemaker/djl_inference/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -17,3 +17,4 @@
1717
from sagemaker.djl_inference.model import DJLModel # noqa: F401
1818
from sagemaker.djl_inference.model import DeepSpeedModel # noqa: F401
1919
from sagemaker.djl_inference.model import HuggingFaceAccelerateModel # noqa: F401
20+
from sagemaker.djl_inference.model import FasterTransformerModel # noqa: F401

src/sagemaker/djl_inference/defaults.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -30,17 +30,19 @@
3030
STABLE_DIFFUSION_MODEL_TYPE,
3131
}
3232

33-
DEEPSPEED_SUPPORTED_ARCHITECTURES = {
33+
FASTER_TRANSFORMER_RECOMMENDED_ARCHITECTURES = {
34+
"t5",
35+
}
36+
37+
FASTER_TRANSFORMER_SUPPORTED_ARCHITECTURES = {
38+
"bert",
39+
"gpt2",
3440
"bloom",
3541
"opt",
36-
"gpt_neox",
3742
"gptj",
43+
"gpt_neox",
3844
"gpt_neo",
39-
"gpt2",
40-
"xlm-roberta",
41-
"roberta",
42-
"bert",
43-
STABLE_DIFFUSION_MODEL_TYPE,
45+
"t5",
4446
}
4547

4648
ALLOWED_INSTANCE_FAMILIES = {

src/sagemaker/djl_inference/model.py

+74-6
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ class DJLServingEngineEntryPointDefaults(Enum):
5252
DEEPSPEED = ("DeepSpeed", "djl_python.deepspeed")
5353
HUGGINGFACE_ACCELERATE = ("Python", "djl_python.huggingface")
5454
STABLE_DIFFUSION = ("DeepSpeed", "djl_python.stable-diffusion")
55+
FASTER_TRANSFORMER = ("FasterTransformer", "djl_python.fastertransformer")
5556

5657

5758
class DJLPredictor(Predictor):
@@ -93,30 +94,34 @@ def __init__(
9394
def _determine_engine_for_model(model_type: str, num_partitions: int, num_heads: int):
9495
"""Placeholder docstring"""
9596

96-
# Tensor Parallelism with DeepSpeed is only possible if attention heads can be split evenly
97+
# Tensor Parallelism is only possible if attention heads can be split evenly
9798
# across devices
9899
if num_heads is not None and num_partitions is not None and num_heads % num_partitions:
99100
return HuggingFaceAccelerateModel
100101
if model_type in defaults.DEEPSPEED_RECOMMENDED_ARCHITECTURES:
101102
return DeepSpeedModel
103+
if model_type in defaults.FASTER_TRANSFORMER_RECOMMENDED_ARCHITECTURES:
104+
return FasterTransformerModel
102105
return HuggingFaceAccelerateModel
103106

104107

105108
def _validate_engine_for_model_type(cls, model_type: str, num_partitions: int, num_heads: int):
106109
"""Placeholder docstring"""
107110

108111
if cls == DeepSpeedModel:
109-
if model_type not in defaults.DEEPSPEED_SUPPORTED_ARCHITECTURES:
110-
raise ValueError(
111-
f"{model_type} is not supported by DeepSpeed. "
112-
f"Supported model_types are {defaults.DEEPSPEED_SUPPORTED_ARCHITECTURES}"
113-
)
114112
if num_heads is not None and num_partitions is not None and num_heads % num_partitions:
115113
raise ValueError(
116114
"The number of attention heads is not evenly divisible by the number of partitions."
117115
"Please set the number of partitions such that the number of attention heads can be"
118116
"evenly split across the partitions."
119117
)
118+
if cls == FasterTransformerModel:
119+
if model_type not in defaults.FASTER_TRANSFORMER_SUPPORTED_ARCHITECTURES:
120+
raise ValueError(
121+
f"The model architecture {model_type} is currently not supported by "
122+
f"FasterTransformer. Please use a different engine, or use the DJLModel"
123+
f"to let SageMaker pick a recommended engine for this model."
124+
)
120125
return cls
121126

122127

@@ -223,6 +228,8 @@ def __new__(
223228
instance.engine = DJLServingEngineEntryPointDefaults.STABLE_DIFFUSION
224229
elif isinstance(instance, DeepSpeedModel):
225230
instance.engine = DJLServingEngineEntryPointDefaults.DEEPSPEED
231+
elif isinstance(instance, FasterTransformerModel):
232+
instance.engine = DJLServingEngineEntryPointDefaults.FASTER_TRANSFORMER
226233
else:
227234
instance.engine = DJLServingEngineEntryPointDefaults.HUGGINGFACE_ACCELERATE
228235
return instance
@@ -856,3 +863,64 @@ def generate_serving_properties(self, serving_properties=None) -> Dict[str, str]
856863
serving_properties["option.dtype"] = "auto"
857864
serving_properties.pop("option.load_in_8bit", None)
858865
return serving_properties
866+
867+
868+
class FasterTransformerModel(DJLModel):
869+
"""A DJL FasterTransformer SageMaker ``Model``
870+
871+
This can be deployed to a SageMaker ``Endpoint``.
872+
"""
873+
874+
_framework_name = "djl-fastertransformer"
875+
876+
def __init__(
877+
self,
878+
model_id: str,
879+
role: str,
880+
tensor_parallel_degree: Optional[int] = None,
881+
**kwargs,
882+
):
883+
"""Initialize a FasterTransformerModel.
884+
885+
Args:
886+
model_id (str): This is either the HuggingFace Hub model_id, or the Amazon S3 location
887+
containing the uncompressed model artifacts (i.e. not a tar.gz file).
888+
The model artifacts are expected to be in HuggingFace pre-trained model
889+
format (i.e. model should be loadable from the huggingface transformers
890+
from_pretrained api, and should also include tokenizer configs if applicable).
891+
role (str): An AWS IAM role specified with either the name or full ARN. The Amazon
892+
SageMaker training jobs and APIs that create Amazon SageMaker
893+
endpoints use this role to access model artifacts. After the endpoint is created,
894+
the inference code
895+
might use the IAM role, if it needs to access an AWS resource.
896+
tensor_parllel_degree (int): The number of gpus to shard a single instance of the
897+
model across via tensor_parallelism. This should be set to greater than 1 if the
898+
size of the model is larger than the memory available on a single GPU on the
899+
instance. Defaults to None. If not set, no tensor parallel sharding is done.
900+
**kwargs: Keyword arguments passed to the superclasses
901+
:class:`~sagemaker.djl_inference.DJLModel`,
902+
:class:`~sagemaker.model.FrameworkModel`, and
903+
:class:`~sagemaker.model.Model`
904+
905+
.. tip::
906+
907+
You can find additional parameters for initializing this class at
908+
:class:`~sagemaker.djl_inference.DJLModel`,
909+
:class:`~sagemaker.model.FrameworkModel`, and
910+
:class:`~sagemaker.model.Model`.
911+
"""
912+
913+
super(FasterTransformerModel, self).__init__(
914+
model_id,
915+
role,
916+
**kwargs,
917+
)
918+
if self.number_of_partitions and tensor_parallel_degree:
919+
logger.warning(
920+
"Both number_of_partitions and tensor_parallel_degree have been set for "
921+
"FasterTransformerModel."
922+
"These mean the same thing for FasterTransformerModel. Please only set "
923+
"tensor_parallel_degree."
924+
"number_of_partitions will be ignored"
925+
)
926+
self.number_of_partitions = tensor_parallel_degree or self.number_of_partitions

tests/unit/test_djl_inference.py

+1-15
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def test_create_model_automatic_engine_selection(mock_s3_list, mock_read_file, s
147147
sagemaker_session=sagemaker_session,
148148
number_of_partitions=4,
149149
)
150-
assert hf_model.engine == DJLServingEngineEntryPointDefaults.HUGGINGFACE_ACCELERATE
150+
assert hf_model.engine == DJLServingEngineEntryPointDefaults.FASTER_TRANSFORMER
151151

152152
hf_model_config = {
153153
"model_type": "gpt2",
@@ -200,20 +200,6 @@ def test_create_deepspeed_model(mock_s3_list, mock_read_file, sagemaker_session)
200200
)
201201
assert ds_model.engine == DJLServingEngineEntryPointDefaults.DEEPSPEED
202202

203-
ds_model_config = {
204-
"model_type": "t5",
205-
"n_head": 12,
206-
}
207-
mock_read_file.return_value = json.dumps(ds_model_config)
208-
with pytest.raises(ValueError) as invalid_model_type:
209-
_ = DeepSpeedModel(
210-
VALID_UNCOMPRESSED_MODEL_DATA,
211-
ROLE,
212-
sagemaker_session=sagemaker_session,
213-
tensor_parallel_degree=1,
214-
)
215-
assert str(invalid_model_type.value).startswith("t5 is not supported by DeepSpeed")
216-
217203
ds_model_config = {
218204
"model_type": "opt",
219205
"n_head": 25,

0 commit comments

Comments
 (0)