Skip to content

Commit

Permalink
Merge pull request #203 from roboflow/fix/add-fine-tuned-models
Browse files Browse the repository at this point in the history
Add finetuned models
  • Loading branch information
bigbitbus authored Nov 3, 2023
2 parents 1144f66 + 3eb1e43 commit 39e838a
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 20 deletions.
2 changes: 1 addition & 1 deletion roboflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from roboflow.models import CLIPModel, GazeModel
from roboflow.util.general import write_line

__version__ = "1.1.8"
__version__ = "1.1.9"


def check_key(api_key, model, notebook, num_retries=0):
Expand Down
42 changes: 23 additions & 19 deletions roboflow/models/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,6 @@ def predict_video(
signed_url = video_path

url = urljoin(API_URL, "/videoinfer/?api_key=" + self.__api_key)

if model_class in ("CLIPModel", "GazeModel"):
if model_class == "CLIPModel":
model = "clip"
Expand All @@ -257,6 +256,14 @@ def predict_video(
],
}
]
else:
models = [
{
"model_id": self.dataset_id,
"model_version": self.version,
"inference_type": self.type,
}
]

for model in additional_models:
models.append(SUPPORTED_ADDITIONAL_MODELS[model])
Expand Down Expand Up @@ -308,28 +315,28 @@ def poll_for_video_results(self, job_id: str = None) -> dict:
url = urljoin(
API_URL, "/videoinfer/?api_key=" + self.__api_key + "&job_id=" + self.job_id
)

try:
response = requests.get(url, headers={"Content-Type": "application/json"})
except Exception as e:
raise Exception(f"Error getting video inference results: {e}")

if not response.ok:
raise Exception(f"Error getting video inference results: {response.text}")

data = response.json()
if "status" not in data:
return {} # No status available
if data.get("status") > 1:
return data # Error
elif data.get("status") == 1:
return {} # Still running
else: # done
output_signed_url = data["output_signed_url"]
inference_data = requests.get(
output_signed_url, headers={"Content-Type": "application/json"}
)

if data.get("status") != 0:
return {}

output_signed_url = data["output_signed_url"]

inference_data = requests.get(
output_signed_url, headers={"Content-Type": "application/json"}
)

# frame_offset and model name are top-level keys
return inference_data.json()
# frame_offset and model name are top-level keys
return inference_data.json()

def poll_until_video_results(self, job_id) -> dict:
"""
Expand Down Expand Up @@ -357,14 +364,11 @@ def poll_until_video_results(self, job_id) -> dict:
job_id = self.job_id

attempts = 0

print(f"Checking for video inference results for job {job_id} every 60s")
while True:
time.sleep(60)
print(f"({attempts * 60}s): Checking for inference results")

response = self.poll_for_video_results()

time.sleep(60)

attempts += 1

if response != {}:
Expand Down

0 comments on commit 39e838a

Please sign in to comment.