diff --git a/roboflow/__init__.py b/roboflow/__init__.py index 38da02d2..3b12b5b5 100644 --- a/roboflow/__init__.py +++ b/roboflow/__init__.py @@ -13,7 +13,7 @@ from roboflow.models import CLIPModel, GazeModel from roboflow.util.general import write_line -__version__ = "1.1.8" +__version__ = "1.1.9" def check_key(api_key, model, notebook, num_retries=0): diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index 64fce042..c7fc946a 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -239,7 +239,6 @@ def predict_video( signed_url = video_path url = urljoin(API_URL, "/videoinfer/?api_key=" + self.__api_key) - if model_class in ("CLIPModel", "GazeModel"): if model_class == "CLIPModel": model = "clip" @@ -257,6 +256,14 @@ def predict_video( ], } ] + else: + models = [ + { + "model_id": self.dataset_id, + "model_version": self.version, + "inference_type": self.type, + } + ] for model in additional_models: models.append(SUPPORTED_ADDITIONAL_MODELS[model]) @@ -308,7 +315,6 @@ def poll_for_video_results(self, job_id: str = None) -> dict: url = urljoin( API_URL, "/videoinfer/?api_key=" + self.__api_key + "&job_id=" + self.job_id ) - try: response = requests.get(url, headers={"Content-Type": "application/json"}) except Exception as e: @@ -316,20 +322,21 @@ def poll_for_video_results(self, job_id: str = None) -> dict: if not response.ok: raise Exception(f"Error getting video inference results: {response.text}") - data = response.json() + if "status" not in data: + return {} # No status available + if data.get("status") > 1: + return data # Error + elif data.get("status") == 1: + return {} # Still running + else: # done + output_signed_url = data["output_signed_url"] + inference_data = requests.get( + output_signed_url, headers={"Content-Type": "application/json"} + ) - if data.get("status") != 0: - return {} - - output_signed_url = data["output_signed_url"] - - inference_data = requests.get( - output_signed_url, headers={"Content-Type": "application/json"} - ) - - # frame_offset and model name are top-level keys - return inference_data.json() + # frame_offset and model name are top-level keys + return inference_data.json() def poll_until_video_results(self, job_id) -> dict: """ @@ -357,14 +364,11 @@ def poll_until_video_results(self, job_id) -> dict: job_id = self.job_id attempts = 0 - + print(f"Checking for video inference results for job {job_id} every 60s") while True: + time.sleep(60) print(f"({attempts * 60}s): Checking for inference results") - response = self.poll_for_video_results() - - time.sleep(60) - attempts += 1 if response != {}: