Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 14 additions & 14 deletions scope3ai/tracers/huggingface/vision/image_classification.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
import io
import time
from dataclasses import dataclass
from typing import Any, Callable, Optional, Union, List
from PIL import Image
from typing import Any, Callable, List, Optional, Union

from aiohttp import ClientResponse
from huggingface_hub import (
InferenceClient,
AsyncInferenceClient,
ImageClassificationOutputElement,
InferenceClient,
) # type: ignore[import-untyped]
from PIL import Image
from requests import Response

from scope3ai.api.types import Scope3AIContext, ImpactRow
from scope3ai.api.types import ImpactRow, Scope3AIContext
from scope3ai.api.typesgen import Image as RootImage
from scope3ai.api.typesgen import Task
from scope3ai.constants import PROVIDERS
from scope3ai.lib import Scope3AI
Expand All @@ -24,33 +26,31 @@

@dataclass
class ImageClassificationOutput:
elements: List[ImageClassificationOutputElement] = None
elements: Optional[List[ImageClassificationOutputElement]] = None
scope3ai: Optional[Scope3AIContext] = None


def _hugging_face_image_classification_wrapper(
timer_start: Any,
model: Any,
response: Any,
http_response: Union[ClientResponse, Response],
http_response: Optional[Union[ClientResponse, Response]],
args: Any,
kwargs: Any,
) -> ImageClassificationOutput:
input_tokens = 0
compute_time = time.perf_counter() - timer_start
input_images = []
if http_response:
compute_time = http_response.headers.get("x-compute-time")
else:
compute_time = time.perf_counter() - timer_start
compute_time = http_response.headers.get("x-compute-time") or compute_time
try:
image_param = args[0] if len(args) > 0 else kwargs["image"]
if type(image_param) is str:
input_image = Image.open(args[0] if len(args) > 0 else kwargs["image"])
else:
input_image = Image.open(io.BytesIO(image_param))
input_width, input_height = input_image.size
input_images = [
("{width}x{height}".format(width=input_width, height=input_height))
]
input_images = [RootImage(root=f"{input_width}x{input_height}")]
except Exception:
pass
scope3_row = ImpactRow(
Expand Down Expand Up @@ -78,7 +78,7 @@ def huggingface_image_classification_wrapper(
with requests_response_capture() as responses:
response = wrapped(*args, **kwargs)
http_responses = responses.get()
if len(http_responses) > 0:
if http_responses:
http_response = http_responses[-1]
model = kwargs.get("model") or instance.get_recommended_model(
HUGGING_FACE_IMAGE_CLASSIFICATION_TASK
Expand All @@ -96,7 +96,7 @@ async def huggingface_image_classification_wrapper_async(
with aiohttp_response_capture() as responses:
response = await wrapped(*args, **kwargs)
http_responses = responses.get()
if len(http_responses) > 0:
if http_responses:
http_response = http_responses[-1]
model = kwargs.get("model") or instance.get_recommended_model(
HUGGING_FACE_IMAGE_CLASSIFICATION_TASK
Expand Down
28 changes: 13 additions & 15 deletions scope3ai/tracers/huggingface/vision/image_segmentation.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
import io
import time
from dataclasses import dataclass
from typing import Any, Callable, Optional, Union, List
from PIL import Image
from typing import Any, Callable, List, Optional, Union

from aiohttp import ClientResponse
from huggingface_hub import (
InferenceClient,
AsyncInferenceClient,
ImageSegmentationOutputElement,
InferenceClient,
) # type: ignore[import-untyped]
from PIL import Image
from requests import Response

from scope3ai.api.types import Scope3AIContext, ImpactRow
from scope3ai.api.types import ImpactRow, Scope3AIContext
from scope3ai.api.typesgen import Image as RootImage
from scope3ai.api.typesgen import Task
from scope3ai.constants import PROVIDERS
from scope3ai.lib import Scope3AI
Expand All @@ -25,34 +26,31 @@

@dataclass
class ImageSegmentationOutput:
elements: List[ImageSegmentationOutputElement] = None
elements: Optional[List[ImageSegmentationOutputElement]] = None
scope3ai: Optional[Scope3AIContext] = None


def _hugging_face_image_segmentation_wrapper(
timer_start: Any,
model: Any,
response: Any,
http_response: Union[ClientResponse, Response],
http_response: Optional[Union[ClientResponse, Response]],
args: Any,
kwargs: Any,
) -> ImageSegmentationOutput:
input_tokens = 0
compute_time = time.perf_counter() - timer_start
if http_response:
compute_time = http_response.headers.get("x-compute-time")
else:
compute_time = time.perf_counter() - timer_start
input_images = None
compute_time = http_response.headers.get("x-compute-time") or compute_time
input_images = []
try:
image_param = args[0] if len(args) > 0 else kwargs["image"]
if type(image_param) is str:
input_image = Image.open(args[0] if len(args) > 0 else kwargs["image"])
else:
input_image = Image.open(io.BytesIO(image_param))
input_width, input_height = input_image.size
input_images = [
("{width}x{height}".format(width=input_width, height=input_height))
]
input_images = [RootImage(root=f"{input_width}x{input_height}")]
except Exception:
pass
scope3_row = ImpactRow(
Expand All @@ -78,7 +76,7 @@ def huggingface_image_segmentation_wrapper(
with requests_response_capture() as responses:
response = wrapped(*args, **kwargs)
http_responses = responses.get()
if len(http_responses) > 0:
if http_responses:
http_response = http_responses[-1]
model = kwargs.get("model") or instance.get_recommended_model(
HUGGING_FACE_IMAGE_SEGMENTATION_TASK
Expand All @@ -96,7 +94,7 @@ async def huggingface_image_segmentation_wrapper_async(
with aiohttp_response_capture() as responses:
response = await wrapped(*args, **kwargs)
http_responses = responses.get()
if len(http_responses) > 0:
if http_responses:
http_response = http_responses[-1]
model = kwargs.get("model") or instance.get_recommended_model(
HUGGING_FACE_IMAGE_SEGMENTATION_TASK
Expand Down
27 changes: 13 additions & 14 deletions scope3ai/tracers/huggingface/vision/object_detection.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
import io
import time
from dataclasses import dataclass
from typing import Any, Callable, Optional, Union, List
from typing import Any, Callable, List, Optional, Union

from PIL import Image
from aiohttp import ClientResponse
from huggingface_hub import (
InferenceClient,
AsyncInferenceClient,
InferenceClient,
ObjectDetectionOutputElement,
) # type: ignore[import-untyped]
from PIL import Image
from requests import Response

from scope3ai.api.types import Scope3AIContext, ImpactRow
from scope3ai.api.types import ImpactRow, Scope3AIContext
from scope3ai.api.typesgen import Image as RootImage
from scope3ai.api.typesgen import Task
from scope3ai.constants import PROVIDERS
from scope3ai.lib import Scope3AI
Expand All @@ -25,33 +26,31 @@

@dataclass
class ObjectDetectionOutput:
elements: List[ObjectDetectionOutputElement] = None
elements: Optional[List[ObjectDetectionOutputElement]] = None
scope3ai: Optional[Scope3AIContext] = None


def _hugging_face_object_detection_wrapper(
timer_start: Any,
model: Any,
response: Any,
http_response: Union[ClientResponse, Response],
http_response: Optional[Union[ClientResponse, Response]],
args: Any,
kwargs: Any,
) -> ObjectDetectionOutput:
input_tokens = 0
compute_time = time.perf_counter() - timer_start
input_images = []
if http_response:
compute_time = http_response.headers.get("x-compute-time")
else:
compute_time = time.perf_counter() - timer_start
compute_time = http_response.headers.get("x-compute-time") or compute_time
try:
image_param = args[0] if len(args) > 0 else kwargs["image"]
if type(image_param) is str:
input_image = Image.open(args[0] if len(args) > 0 else kwargs["image"])
else:
input_image = Image.open(io.BytesIO(image_param))
input_width, input_height = input_image.size
input_images = [
("{width}x{height}".format(width=input_width, height=input_height))
]
input_images = [RootImage(root=f"{input_width}x{input_height}")]
except Exception:
pass
scope3_row = ImpactRow(
Expand All @@ -78,7 +77,7 @@ def huggingface_object_detection_wrapper(
with requests_response_capture() as responses:
response = wrapped(*args, **kwargs)
http_responses = responses.get()
if len(http_responses) > 0:
if http_responses:
http_response = http_responses[-1]
model = kwargs.get("model") or instance.get_recommended_model(
HUGGING_FACE_OBJECT_DETECTION_TASK
Expand All @@ -96,7 +95,7 @@ async def huggingface_object_detection_wrapper_async(
with aiohttp_response_capture() as responses:
response = await wrapped(*args, **kwargs)
http_responses = responses.get()
if len(http_responses) > 0:
if http_responses:
http_response = http_responses[-1]
model = kwargs.get("model") or instance.get_recommended_model(
HUGGING_FACE_OBJECT_DETECTION_TASK
Expand Down
Loading