diff --git a/docker_images/sentence_transformers/Dockerfile b/docker_images/sentence_transformers/Dockerfile index 4ea04f7b..1622c595 100644 --- a/docker_images/sentence_transformers/Dockerfile +++ b/docker_images/sentence_transformers/Dockerfile @@ -31,4 +31,4 @@ ENV TRANSFORMERS_CACHE=/data # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py -COPY ./app /app/app +COPY ./hf_api_sentence_transformers /app/hf_api_sentence_transformers diff --git a/docker_images/sentence_transformers/app/__init__.py b/docker_images/sentence_transformers/app/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/docker_images/sentence_transformers/app/pipelines/__init__.py b/docker_images/sentence_transformers/app/pipelines/__init__.py deleted file mode 100644 index 2ba1548b..00000000 --- a/docker_images/sentence_transformers/app/pipelines/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from app.pipelines.base import Pipeline, PipelineException # isort:skip - -from app.pipelines.feature_extraction import FeatureExtractionPipeline -from app.pipelines.sentence_similarity import SentenceSimilarityPipeline diff --git a/docker_images/sentence_transformers/hf_api_sentence_transformers/__init__.py b/docker_images/sentence_transformers/hf_api_sentence_transformers/__init__.py new file mode 100644 index 00000000..995bab10 --- /dev/null +++ b/docker_images/sentence_transformers/hf_api_sentence_transformers/__init__.py @@ -0,0 +1,4 @@ +from .pipelines.feature_extraction import FeatureExtractionPipeline +from .pipelines.sentence_similarity import SentenceSimilarityPipeline + +__all__ = ['SentenceSimilarityPipeline', "FeatureExtractionPipeline"] \ No newline at end of file diff --git a/docker_images/sentence_transformers/app/main.py b/docker_images/sentence_transformers/hf_api_sentence_transformers/main.py similarity index 98% rename from docker_images/sentence_transformers/app/main.py rename to docker_images/sentence_transformers/hf_api_sentence_transformers/main.py index 8c301bfc..295da612 100644 --- a/docker_images/sentence_transformers/app/main.py +++ b/docker_images/sentence_transformers/hf_api_sentence_transformers/main.py @@ -4,7 +4,7 @@ from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok -from app.pipelines import ( +from hf_api_sentence_transformers.pipelines import ( FeatureExtractionPipeline, Pipeline, SentenceSimilarityPipeline, diff --git a/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/__init__.py b/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/__init__.py new file mode 100644 index 00000000..74be36d0 --- /dev/null +++ b/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/__init__.py @@ -0,0 +1,4 @@ +from hf_api_sentence_transformers.pipelines.base import Pipeline, PipelineException # isort:skip + +from hf_api_sentence_transformers.pipelines.feature_extraction import FeatureExtractionPipeline +from hf_api_sentence_transformers.pipelines.sentence_similarity import SentenceSimilarityPipeline diff --git a/docker_images/sentence_transformers/app/pipelines/base.py b/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/base.py similarity index 100% rename from docker_images/sentence_transformers/app/pipelines/base.py rename to docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/base.py diff --git a/docker_images/sentence_transformers/app/pipelines/feature_extraction.py b/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/feature_extraction.py similarity index 91% rename from docker_images/sentence_transformers/app/pipelines/feature_extraction.py rename to docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/feature_extraction.py index bc5ed5a4..2821c646 100644 --- a/docker_images/sentence_transformers/app/pipelines/feature_extraction.py +++ b/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/feature_extraction.py @@ -1,7 +1,7 @@ import os from typing import List -from app.pipelines import Pipeline +from hf_api_sentence_transformers.pipelines import Pipeline from sentence_transformers import SentenceTransformer diff --git a/docker_images/sentence_transformers/app/pipelines/sentence_similarity.py b/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/sentence_similarity.py similarity index 95% rename from docker_images/sentence_transformers/app/pipelines/sentence_similarity.py rename to docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/sentence_similarity.py index 5829ff30..0ee3c056 100644 --- a/docker_images/sentence_transformers/app/pipelines/sentence_similarity.py +++ b/docker_images/sentence_transformers/hf_api_sentence_transformers/pipelines/sentence_similarity.py @@ -1,7 +1,7 @@ import os from typing import Dict, List, Union -from app.pipelines import Pipeline +from hf_api_sentence_transformers.pipelines import Pipeline from sentence_transformers import SentenceTransformer, util diff --git a/docker_images/sentence_transformers/requirements.txt b/docker_images/sentence_transformers/requirements.txt index 686e5f2e..c8430288 100644 --- a/docker_images/sentence_transformers/requirements.txt +++ b/docker_images/sentence_transformers/requirements.txt @@ -6,4 +6,3 @@ tokenizers==0.19.1 protobuf==3.18.3 huggingface_hub==0.23.3 sacremoses==0.0.53 -# dummy. diff --git a/docker_images/sentence_transformers/setup.py b/docker_images/sentence_transformers/setup.py new file mode 100644 index 00000000..daeaa164 --- /dev/null +++ b/docker_images/sentence_transformers/setup.py @@ -0,0 +1,21 @@ +from setuptools import setup, find_packages +import os + + +VERSION = "0.0.1" + +def parse_requirements(filename): + """ Load requirements from a pip requirements file """ + with open(filename, 'r') as f: + return f.read().splitlines() + +# Specify the path to the requirements.txt file +requirements_path = os.path.join(os.path.dirname(__file__), 'requirements.txt') + + +setup( + name='hf_api_sentence_transformers', + version=VERSION, + packages=find_packages(), + install_requires=parse_requirements(requirements_path) +) \ No newline at end of file diff --git a/docker_images/sentence_transformers/tests/test_api_feature_extraction.py b/docker_images/sentence_transformers/tests/test_api_feature_extraction.py index c4362b25..6163c6f6 100644 --- a/docker_images/sentence_transformers/tests/test_api_feature_extraction.py +++ b/docker_images/sentence_transformers/tests/test_api_feature_extraction.py @@ -2,7 +2,7 @@ import os from unittest import TestCase, skipIf -from app.main import ALLOWED_TASKS +from hf_api_sentence_transformers.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @@ -18,7 +18,7 @@ def setUp(self): self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "feature-extraction" - from app.main import app + from hf_api_sentence_transformers.main import app self.app = app diff --git a/docker_images/sentence_transformers/tests/test_api_sentence_similarity.py b/docker_images/sentence_transformers/tests/test_api_sentence_similarity.py index 98abeef8..87a700be 100644 --- a/docker_images/sentence_transformers/tests/test_api_sentence_similarity.py +++ b/docker_images/sentence_transformers/tests/test_api_sentence_similarity.py @@ -2,7 +2,7 @@ import os from unittest import TestCase, skipIf -from app.main import ALLOWED_TASKS +from hf_api_sentence_transformers.main import ALLOWED_TASKS from parameterized import parameterized_class from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @@ -21,7 +21,7 @@ def setUp(self): self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = self.model_id os.environ["TASK"] = "sentence-similarity" - from app.main import app + from hf_api_sentence_transformers.main import app self.app = app