Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix post_image_query Endpoint #11

Merged
merged 7 commits into from
Jul 21, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 6 additions & 18 deletions app/api/endpoints/image_queries.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,11 @@
from typing import Optional

import numpy as np
from fastapi import APIRouter, Depends, Query
from fastapi import APIRouter, Depends, Query, Request
from model import ImageQuery
from PIL import Image

from app.core.utils import get_groundlight_sdk_instance, get_motion_detector_instance, prefixed_ksuid
from app.schemas.schemas import ImageQueryCreate

logger = logging.getLogger(__name__)

Expand All @@ -18,19 +17,13 @@

@router.post("", response_model=ImageQuery)
async def post_image_query(
props: ImageQueryCreate = Depends(ImageQueryCreate),
detector_id: str,
wait: float = None,
Comment on lines +36 to +37
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems like we could still benefit from pydantic modeling here, right? Check that wait is actually a float not a string.

request: Request = None,
gl: Depends = Depends(get_groundlight_sdk_instance),
motion_detector: Depends = Depends(get_motion_detector_instance),
):
"""
Submit an image query to the detector.
NOTE: For now motion detection assumes that images are submitted to the
same detector. If the client sends the same image to multiple detectors
we would incorrectly flag no motion detected for the second detector.
"""
image = props.image
detector_id = props.detector_id
wait_time = props.wait
image = await request.body()
img = Image.open(BytesIO(image))
img_numpy = np.array(img)

Expand All @@ -43,19 +36,14 @@ async def post_image_query(
)

if motion_detected or iq_response_is_improvable:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't remember the iq_response_is_improvable logic. The name is confusing, and I don't think the logic is correct. We shouldn't make motion-detection decisions based on the confidence of the server response.

image_query = gl.submit_image_query(detector=detector_id, image=image, wait=wait_time)

# Store the cloud's response so that if the next image has no motion, we will return
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why remove this comment?

# the same response.
image_query = gl.submit_image_query(detector=detector_id, image=image, wait=wait)
motion_detector.image_query_response = image_query
logger.debug("Motion detected")
return image_query

logger.debug("No motion detected")

new_image_query = ImageQuery(**motion_detector.image_query_response.dict())
new_image_query.id = prefixed_ksuid(prefix="iqe_")

return new_image_query


Expand Down
19 changes: 19 additions & 0 deletions app/core/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
from io import BytesIO

import ksuid
from fastapi import Request
from PIL import Image


def get_groundlight_sdk_instance(request: Request):
Expand Down Expand Up @@ -33,3 +36,19 @@ def prefixed_ksuid(prefix: str = None) -> str:
k = ksuid.KsuidMs()
out = f"{prefix}{k}"
return out


def pil_image_to_bytes(img: Image.Image, format: str = "JPEG") -> bytes:
"""
Convert a PIL Image object to raw bytes.

Args:
img (Image.Image): The PIL Image object.
format (str, optional): The image format. Defaults to "JPEG".

Returns:
bytes: The raw bytes of the image.
"""
with BytesIO() as buffer:
img.save(buffer, format=format)
return buffer.getvalue()
46 changes: 1 addition & 45 deletions app/schemas/schemas.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import base64
import binascii
from typing import Optional

from pydantic import BaseModel, Field, confloat, validator
from pydantic import BaseModel, Field, confloat


class DetectorCreate(BaseModel):
Expand All @@ -15,45 +13,3 @@ class DetectorCreate(BaseModel):
),
)
pipeline_config: Optional[str] = Field(None, description="Pipeline config")


class ImageQueryCreate(BaseModel):
"""
NOTE: For the `image` field, types bytes, BytesIO, BufferedReader, Image.Image
and numpy.ndarray are not JSON compatible. For now we are only supporting
str type although the Groundlight SDK accepts all the above.
Reference: https://fastapi.tiangolo.com/tutorial/encoder/
"""

detector_id: str = Field(description="Detector ID")
image: str = Field(
description="Image to submit to the detector. The image is expected to be a base64 encoded string."
)
wait: Optional[float] = Field(None, description="How long to wait for a confident response (seconds)")

@validator("image")
@classmethod
def validate_image(cls, value):
return cls._sanitize_image_input(image=value)

@classmethod
def _sanitize_image_input(cls, image: str) -> bytes:
"""Sanitizes the image input to be a bytes object.

Args:
image str: Image input assumed to be a base64 encoded string.

Raises:
ValueError: In case the image type is not supported.

Returns:
bytes: Image bytes.
"""
if isinstance(image, str):
# The image is a base64 encoded string, so decode it
try:
return base64.b64decode(image)
except binascii.Error as e:
raise ValueError(f"Invalid base64 string: {e}")

raise ValueError(f"Unsupported input image type: {type(image)}")
52 changes: 20 additions & 32 deletions test/api/test_image_queries.py
Original file line number Diff line number Diff line change
@@ -1,50 +1,38 @@
import base64
import urllib.parse
from io import BytesIO

import pytest
from groundlight import Groundlight
from model import Detector
from PIL import Image

from app.api.api import DETECTORS, IMAGE_QUERIES
from app.api.naming import full_path
from app.core.utils import pil_image_to_bytes
from app.main import app

from ..conftest import TestClient

client = TestClient(app)

# Detector ID associated with the detector with parameters
# name="edge_testing_det",
# query="Is there a dog in the image?",
# confidence_threshold=0.9
DETECTOR_ID = "det_2SagpFUrs83cbMZsap5hZzRjZw4"


@pytest.fixture
def detector_id():
url = full_path(DETECTORS) + f"/{DETECTOR_ID}"
response = client.get(url).json()
@pytest.fixture(name="gl")
def fixture_gl() -> Groundlight:
"""Creates a Groundlight client object"""
return Groundlight(endpoint="http://localhost:6717")

return response["id"]

@pytest.fixture
def detector(gl: Groundlight) -> Detector:
return gl.get_detector(id=DETECTOR_ID)


def test_post_image_queries(detector_id):
def test_post_image_queries(gl: Groundlight, detector: Detector):
"""
NOTE: We need to encode the image as a base64 string since bytes are not
JSON-serializable.
Tests that submitting an image query using the edge server proceeds
without failure.
"""
image = Image.open("test/assets/dog.jpeg")
byte_array = BytesIO()
image.save(byte_array, format="JPEG")

image_encoding = base64.b64encode(byte_array.getvalue()).decode()
image_encoding = urllib.parse.quote_plus(image_encoding)

url = full_path(IMAGE_QUERIES) + f"?detector_id={detector_id}&image={image_encoding}&wait=10"

response = client.post(url).json()

assert "id" in response
assert "detector_id" in response
assert "query" in response
assert "created_at" in response
assert "type" in response
assert "result" in response
assert "result_type" in response
assert response["detector_id"] == detector_id
image_bytes = pil_image_to_bytes(img=image)
gl.submit_image_query(detector=detector.id, image=image_bytes, wait=10.0)