From d771567aae6ab314794d90060ffdc23bd9a85c35 Mon Sep 17 00:00:00 2001 From: martinbrose <13284268+martinbrose@users.noreply.github.com> Date: Sun, 20 Aug 2023 19:19:13 +0100 Subject: [PATCH 1/6] Add question-answering to inference client --- docs/source/guides/inference.md | 2 +- src/huggingface_hub/inference/_client.py | 55 ++++++++++++++++++ .../inference/_generated/_async_client.py | 56 +++++++++++++++++++ src/huggingface_hub/inference/_types.py | 19 +++++++ ...ClientVCRTest.test_question_answering.yaml | 48 ++++++++++++++++ tests/test_inference_client.py | 10 ++++ 6 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 tests/cassettes/InferenceClientVCRTest.test_question_answering.yaml diff --git a/docs/source/guides/inference.md b/docs/source/guides/inference.md index 5ee7d7a114..e12f72045a 100644 --- a/docs/source/guides/inference.md +++ b/docs/source/guides/inference.md @@ -135,7 +135,7 @@ has a simple API that supports the most common tasks. Here is a list of the curr | NLP | [Conversational](https://huggingface.co/tasks/conversational) | ✅ | [`~InferenceClient.conversational`] | | | [Feature Extraction](https://huggingface.co/tasks/feature-extraction) | ✅ | [`~InferenceClient.feature_extraction`] | | | [Fill Mask](https://huggingface.co/tasks/fill-mask) | | | -| | [Question Answering](https://huggingface.co/tasks/question-answering) | | | +| | [Question Answering](https://huggingface.co/tasks/question-answering) | ✅ | [`~InferenceClient.question-answering`] | | [Sentence Similarity](https://huggingface.co/tasks/sentence-similarity) | ✅ | [`~InferenceClient.sentence_similarity`] | | | [Summarization](https://huggingface.co/tasks/summarization) | ✅ | [`~InferenceClient.summarization`] | | | [Table Question Answering](https://huggingface.co/tasks/table-question-answering) | | | diff --git a/src/huggingface_hub/inference/_client.py b/src/huggingface_hub/inference/_client.py index 15d0c4edff..536a9e645a 100644 --- a/src/huggingface_hub/inference/_client.py +++ b/src/huggingface_hub/inference/_client.py @@ -80,6 +80,7 @@ ConversationalOutput, ImageSegmentationOutput, ObjectDetectionOutput, + QuestionAnsweringOutput, ) from huggingface_hub.utils import ( BadRequestError, @@ -673,6 +674,60 @@ def object_detection( raise ValueError(f"Server output must be a list. Got {type(output)}: {str(output)[:200]}...") return output + def question_answering( + self, question: str, context: str, model: str, *, parameters: Optional[Dict[str, Any]] = None + ) -> List[QuestionAnsweringOutput]: + """ + Perform sentiment-analysis on the given text. + + Args: + question (`str`): + Question to be answered. + context (`str`): + The context of the question. + model (`str`): + The model to use for the text question-answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. + parameters (`Dict[str, Any]`, *optional*): + Additional parameters for the text classification task. Defaults to None. For more details about the available + parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task) + + + Returns: + `Dict`: a dictionary containing: + - answer: A string that’s the answer within the text. + - score: A float that represents how likely that the answer is correct + - start: The index (string wise) of the start of the answer within context. + - stop: The index (string wise) of the stop of the answer within context. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> output = client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") + >>> output + {'score': 0.9326562285423279, 'start': 11, 'end': 16, 'answer': 'Clara'} + ``` + """ + if model is None: + raise ValueError("You must specify a model. Task question-answering has no recommended standard model.") + + payload: Dict[str, Any] = {"question": question, "context": context} + if parameters is not None: + payload["parameters"] = parameters + response = self.post( + json=payload, + model=model, + task="question_answering", + ) + return _bytes_to_dict(response) + def sentence_similarity( self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None ) -> List[float]: diff --git a/src/huggingface_hub/inference/_generated/_async_client.py b/src/huggingface_hub/inference/_generated/_async_client.py index f9df5e2644..bf405aac13 100644 --- a/src/huggingface_hub/inference/_generated/_async_client.py +++ b/src/huggingface_hub/inference/_generated/_async_client.py @@ -64,6 +64,7 @@ ConversationalOutput, ImageSegmentationOutput, ObjectDetectionOutput, + QuestionAnsweringOutput, ) from huggingface_hub.utils import ( build_hf_headers, @@ -678,6 +679,61 @@ async def object_detection( raise ValueError(f"Server output must be a list. Got {type(output)}: {str(output)[:200]}...") return output + async def question_answering( + self, question: str, context: str, model: str, *, parameters: Optional[Dict[str, Any]] = None + ) -> List[QuestionAnsweringOutput]: + """ + Perform sentiment-analysis on the given text. + + Args: + question (`str`): + Question to be answered. + context (`str`): + The context of the question. + model (`str`): + The model to use for the text question-answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. + parameters (`Dict[str, Any]`, *optional*): + Additional parameters for the text classification task. Defaults to None. For more details about the available + parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task) + + + Returns: + `Dict`: a dictionary containing: + - answer: A string that’s the answer within the text. + - score: A float that represents how likely that the answer is correct + - start: The index (string wise) of the start of the answer within context. + - stop: The index (string wise) of the stop of the answer within context. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> output = await client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") + >>> output + {'score': 0.9326562285423279, 'start': 11, 'end': 16, 'answer': 'Clara'} + ``` + """ + if model is None: + raise ValueError("You must specify a model. Task question-answering has no recommended standard model.") + + payload: Dict[str, Any] = {"question": question, "context": context} + if parameters is not None: + payload["parameters"] = parameters + response = await self.post( + json=payload, + model=model, + task="question_answering", + ) + return _bytes_to_dict(response) + async def sentence_similarity( self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None ) -> List[float]: diff --git a/src/huggingface_hub/inference/_types.py b/src/huggingface_hub/inference/_types.py index 3af2717aec..6e364e4a19 100644 --- a/src/huggingface_hub/inference/_types.py +++ b/src/huggingface_hub/inference/_types.py @@ -98,3 +98,22 @@ class ObjectDetectionOutput(TypedDict): label: str box: dict score: float + + +class QuestionAnsweringOutput(TypedDict): + """Dictionary containing information about a [`~InferenceClient.question_answering`] task. + + Args: + label (`str`): + The label corresponding to the detected object. + box (`dict`): + A dict response of bounding box coordinates of + the detected object: xmin, ymin, xmax, ymax + score (`float`): + The score corresponding to the detected object. + """ + + score: float + start: int + end: int + answer: str diff --git a/tests/cassettes/InferenceClientVCRTest.test_question_answering.yaml b/tests/cassettes/InferenceClientVCRTest.test_question_answering.yaml new file mode 100644 index 0000000000..5e6ee3c0d6 --- /dev/null +++ b/tests/cassettes/InferenceClientVCRTest.test_question_answering.yaml @@ -0,0 +1,48 @@ +interactions: +- request: + body: '{"question": "What is the meaning of life?", "context": "42"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br + Connection: + - keep-alive + Content-Length: + - '61' + Content-Type: + - application/json + X-Amzn-Trace-Id: + - e3071dca-bb69-47b9-b9c7-f0ce58a69927 + user-agent: + - unknown/None; hf_hub/0.17.0.dev0; python/3.10.12 + method: POST + uri: https://api-inference.huggingface.co/models/deepset/roberta-base-squad2 + response: + body: + string: '{"score":1.4291124728060822e-08,"start":0,"end":2,"answer":"42"}' + headers: + Connection: + - keep-alive + Content-Length: + - '64' + Content-Type: + - application/json + Date: + - Sun, 20 Aug 2023 18:17:17 GMT + access-control-allow-credentials: + - 'true' + vary: + - Origin, Access-Control-Request-Method, Access-Control-Request-Headers + x-compute-time: + - '0.094' + x-compute-type: + - cache + x-request-id: + - vY1d3zhYMs71Bmhh1OI5N + x-sha: + - e09df911dd96d8b052d2665dfbb309e9398a9d70 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_inference_client.py b/tests/test_inference_client.py index 2fe2b9bdb9..f73c3017dd 100644 --- a/tests/test_inference_client.py +++ b/tests/test_inference_client.py @@ -171,6 +171,16 @@ def test_object_detection(self) -> None: self.assertIn("xmax", item["box"]) self.assertIn("ymax", item["box"]) + def test_question_answering(self) -> None: + model = "deepset/roberta-base-squad2" + output = self.client.question_answering(question="What is the meaning of life?", context="42", model=model) + self.assertIsInstance(output, dict) + self.assertGreater(len(output), 0) + self.assertIsInstance(output["score"], float) + self.assertIsInstance(output["start"], int) + self.assertIsInstance(output["end"], int) + self.assertIsInstance(output["answer"], str) + def test_sentence_similarity(self) -> None: scores = self.client.sentence_similarity( "Machine learning is so easy.", From 1cbdbff42bbc561677355ae0829a01b94eb350ff Mon Sep 17 00:00:00 2001 From: martinbrose <13284268+martinbrose@users.noreply.github.com> Date: Mon, 21 Aug 2023 14:14:38 +0100 Subject: [PATCH 2/6] Corrected doc string. --- src/huggingface_hub/inference/_client.py | 2 +- src/huggingface_hub/inference/_generated/_async_client.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/huggingface_hub/inference/_client.py b/src/huggingface_hub/inference/_client.py index 536a9e645a..9db1dec3d6 100644 --- a/src/huggingface_hub/inference/_client.py +++ b/src/huggingface_hub/inference/_client.py @@ -678,7 +678,7 @@ def question_answering( self, question: str, context: str, model: str, *, parameters: Optional[Dict[str, Any]] = None ) -> List[QuestionAnsweringOutput]: """ - Perform sentiment-analysis on the given text. + Retrieve the answer to a question from a given text. Args: question (`str`): diff --git a/src/huggingface_hub/inference/_generated/_async_client.py b/src/huggingface_hub/inference/_generated/_async_client.py index bf405aac13..ba736720fd 100644 --- a/src/huggingface_hub/inference/_generated/_async_client.py +++ b/src/huggingface_hub/inference/_generated/_async_client.py @@ -683,7 +683,7 @@ async def question_answering( self, question: str, context: str, model: str, *, parameters: Optional[Dict[str, Any]] = None ) -> List[QuestionAnsweringOutput]: """ - Perform sentiment-analysis on the given text. + Retrieve the answer to a question from a given text. Args: question (`str`): From 186da5d52083de7ca691094df9dbdd24048a2e64 Mon Sep 17 00:00:00 2001 From: martinbrose <13284268+martinbrose@users.noreply.github.com> Date: Mon, 21 Aug 2023 16:13:10 +0100 Subject: [PATCH 3/6] Corrected typos --- src/huggingface_hub/inference/_client.py | 8 ++++---- src/huggingface_hub/inference/_generated/_async_client.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/huggingface_hub/inference/_client.py b/src/huggingface_hub/inference/_client.py index 9db1dec3d6..7657b7c47e 100644 --- a/src/huggingface_hub/inference/_client.py +++ b/src/huggingface_hub/inference/_client.py @@ -686,11 +686,11 @@ def question_answering( context (`str`): The context of the question. model (`str`): - The model to use for the text question-answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. parameters (`Dict[str, Any]`, *optional*): - Additional parameters for the text classification task. Defaults to None. For more details about the available - parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task) + Additional parameters for the question answering task. Defaults to None. For more details about the available + parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#question-answering-task) Returns: @@ -724,7 +724,7 @@ def question_answering( response = self.post( json=payload, model=model, - task="question_answering", + task="question-answering", ) return _bytes_to_dict(response) diff --git a/src/huggingface_hub/inference/_generated/_async_client.py b/src/huggingface_hub/inference/_generated/_async_client.py index ba736720fd..a1f6217227 100644 --- a/src/huggingface_hub/inference/_generated/_async_client.py +++ b/src/huggingface_hub/inference/_generated/_async_client.py @@ -691,11 +691,11 @@ async def question_answering( context (`str`): The context of the question. model (`str`): - The model to use for the text question-answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. parameters (`Dict[str, Any]`, *optional*): - Additional parameters for the text classification task. Defaults to None. For more details about the available - parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task) + Additional parameters for the question answering task. Defaults to None. For more details about the available + parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#question-answering-task) Returns: @@ -730,7 +730,7 @@ async def question_answering( response = await self.post( json=payload, model=model, - task="question_answering", + task="question-answering", ) return _bytes_to_dict(response) From 56b314cd35a01c10ae0a3deaf2bd6b3e7af24046 Mon Sep 17 00:00:00 2001 From: martinbrose <13284268+martinbrose@users.noreply.github.com> Date: Tue, 5 Sep 2023 22:03:14 +0100 Subject: [PATCH 4/6] Change in line with review points on other PRs --- src/huggingface_hub/inference/_client.py | 20 ++++--------------- .../inference/_generated/_async_client.py | 20 ++++--------------- src/huggingface_hub/inference/_types.py | 13 ++++++------ 3 files changed, 15 insertions(+), 38 deletions(-) diff --git a/src/huggingface_hub/inference/_client.py b/src/huggingface_hub/inference/_client.py index 933c614d4b..858ee199fc 100644 --- a/src/huggingface_hub/inference/_client.py +++ b/src/huggingface_hub/inference/_client.py @@ -677,8 +677,8 @@ def object_detection( return output def question_answering( - self, question: str, context: str, model: str, *, parameters: Optional[Dict[str, Any]] = None - ) -> List[QuestionAnsweringOutput]: + self, question: str, context: str, *, model: Optional[str] = None + ) -> QuestionAnsweringOutput: """ Retrieve the answer to a question from a given text. @@ -690,17 +690,9 @@ def question_answering( model (`str`): The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. - parameters (`Dict[str, Any]`, *optional*): - Additional parameters for the question answering task. Defaults to None. For more details about the available - parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#question-answering-task) - Returns: - `Dict`: a dictionary containing: - - answer: A string that’s the answer within the text. - - score: A float that represents how likely that the answer is correct - - start: The index (string wise) of the start of the answer within context. - - stop: The index (string wise) of the stop of the answer within context. + `Dict`: a dictionary of question answering output containing the score, start index, end index, and answer. Raises: [`InferenceTimeoutError`]: @@ -717,18 +709,14 @@ def question_answering( {'score': 0.9326562285423279, 'start': 11, 'end': 16, 'answer': 'Clara'} ``` """ - if model is None: - raise ValueError("You must specify a model. Task question-answering has no recommended standard model.") payload: Dict[str, Any] = {"question": question, "context": context} - if parameters is not None: - payload["parameters"] = parameters response = self.post( json=payload, model=model, task="question-answering", ) - return _bytes_to_dict(response) + return _bytes_to_dict(response) # type: ignore def sentence_similarity( self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None diff --git a/src/huggingface_hub/inference/_generated/_async_client.py b/src/huggingface_hub/inference/_generated/_async_client.py index 36b5ce4afe..c997ae6e9d 100644 --- a/src/huggingface_hub/inference/_generated/_async_client.py +++ b/src/huggingface_hub/inference/_generated/_async_client.py @@ -682,8 +682,8 @@ async def object_detection( return output async def question_answering( - self, question: str, context: str, model: str, *, parameters: Optional[Dict[str, Any]] = None - ) -> List[QuestionAnsweringOutput]: + self, question: str, context: str, *, model: Optional[str] = None + ) -> QuestionAnsweringOutput: """ Retrieve the answer to a question from a given text. @@ -695,17 +695,9 @@ async def question_answering( model (`str`): The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. - parameters (`Dict[str, Any]`, *optional*): - Additional parameters for the question answering task. Defaults to None. For more details about the available - parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#question-answering-task) - Returns: - `Dict`: a dictionary containing: - - answer: A string that’s the answer within the text. - - score: A float that represents how likely that the answer is correct - - start: The index (string wise) of the start of the answer within context. - - stop: The index (string wise) of the stop of the answer within context. + `Dict`: a dictionary of question answering output containing the score, start index, end index, and answer. Raises: [`InferenceTimeoutError`]: @@ -723,18 +715,14 @@ async def question_answering( {'score': 0.9326562285423279, 'start': 11, 'end': 16, 'answer': 'Clara'} ``` """ - if model is None: - raise ValueError("You must specify a model. Task question-answering has no recommended standard model.") payload: Dict[str, Any] = {"question": question, "context": context} - if parameters is not None: - payload["parameters"] = parameters response = await self.post( json=payload, model=model, task="question-answering", ) - return _bytes_to_dict(response) + return _bytes_to_dict(response) # type: ignore async def sentence_similarity( self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None diff --git a/src/huggingface_hub/inference/_types.py b/src/huggingface_hub/inference/_types.py index 6e364e4a19..0597b7d1b5 100644 --- a/src/huggingface_hub/inference/_types.py +++ b/src/huggingface_hub/inference/_types.py @@ -104,13 +104,14 @@ class QuestionAnsweringOutput(TypedDict): """Dictionary containing information about a [`~InferenceClient.question_answering`] task. Args: - label (`str`): - The label corresponding to the detected object. - box (`dict`): - A dict response of bounding box coordinates of - the detected object: xmin, ymin, xmax, ymax score (`float`): - The score corresponding to the detected object. + A float that represents how likely that the answer is correct. + start (`int`): + The index (string wise) of the start of the answer within context. + end (`int`): + The index (string wise) of the end of the answer within context. + answer (`str`): + A string that is the answer within the text. """ score: float From 839a3a0a9cb829806f3d2dd4db565ad77ed18c1c Mon Sep 17 00:00:00 2001 From: Lucain Date: Wed, 6 Sep 2023 18:04:50 +0200 Subject: [PATCH 5/6] Apply suggestions from code review --- docs/source/en/guides/inference.md | 2 +- src/huggingface_hub/inference/_client.py | 3 +-- src/huggingface_hub/inference/_generated/_async_client.py | 3 +-- tests/test_inference_client.py | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/source/en/guides/inference.md b/docs/source/en/guides/inference.md index 44b367dd86..7e766a44a8 100644 --- a/docs/source/en/guides/inference.md +++ b/docs/source/en/guides/inference.md @@ -135,7 +135,7 @@ has a simple API that supports the most common tasks. Here is a list of the curr | NLP | [Conversational](https://huggingface.co/tasks/conversational) | ✅ | [`~InferenceClient.conversational`] | | | [Feature Extraction](https://huggingface.co/tasks/feature-extraction) | ✅ | [`~InferenceClient.feature_extraction`] | | | [Fill Mask](https://huggingface.co/tasks/fill-mask) | | | -| | [Question Answering](https://huggingface.co/tasks/question-answering) | ✅ | [`~InferenceClient.question-answering`] +| | [Question Answering](https://huggingface.co/tasks/question-answering) | ✅ | [`~InferenceClient.question_answering`] | | [Sentence Similarity](https://huggingface.co/tasks/sentence-similarity) | ✅ | [`~InferenceClient.sentence_similarity`] | | | [Summarization](https://huggingface.co/tasks/summarization) | ✅ | [`~InferenceClient.summarization`] | | | [Table Question Answering](https://huggingface.co/tasks/table-question-answering) | | | diff --git a/src/huggingface_hub/inference/_client.py b/src/huggingface_hub/inference/_client.py index f7217862a3..bae967b126 100644 --- a/src/huggingface_hub/inference/_client.py +++ b/src/huggingface_hub/inference/_client.py @@ -705,8 +705,7 @@ def question_answering( ```py >>> from huggingface_hub import InferenceClient >>> client = InferenceClient() - >>> output = client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") - >>> output + >>> client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") {'score': 0.9326562285423279, 'start': 11, 'end': 16, 'answer': 'Clara'} ``` """ diff --git a/src/huggingface_hub/inference/_generated/_async_client.py b/src/huggingface_hub/inference/_generated/_async_client.py index 0f9df19470..777dd26bc3 100644 --- a/src/huggingface_hub/inference/_generated/_async_client.py +++ b/src/huggingface_hub/inference/_generated/_async_client.py @@ -711,8 +711,7 @@ async def question_answering( # Must be run in an async context >>> from huggingface_hub import AsyncInferenceClient >>> client = AsyncInferenceClient() - >>> output = await client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") - >>> output + >>> await client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") {'score': 0.9326562285423279, 'start': 11, 'end': 16, 'answer': 'Clara'} ``` """ diff --git a/tests/test_inference_client.py b/tests/test_inference_client.py index a12df250df..52509eba40 100644 --- a/tests/test_inference_client.py +++ b/tests/test_inference_client.py @@ -179,7 +179,7 @@ def test_question_answering(self) -> None: self.assertIsInstance(output["score"], float) self.assertIsInstance(output["start"], int) self.assertIsInstance(output["end"], int) - self.assertIsInstance(output["answer"], str) + self.assertEqual(output["answer"], "Clara") def test_sentence_similarity(self) -> None: scores = self.client.sentence_similarity( From 20e1ba45025333495f786b816ccc8231a6f81e8d Mon Sep 17 00:00:00 2001 From: Lucain Pouget Date: Wed, 6 Sep 2023 18:25:23 +0200 Subject: [PATCH 6/6] fix --- tests/test_inference_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_inference_client.py b/tests/test_inference_client.py index 52509eba40..61dc5852eb 100644 --- a/tests/test_inference_client.py +++ b/tests/test_inference_client.py @@ -179,7 +179,7 @@ def test_question_answering(self) -> None: self.assertIsInstance(output["score"], float) self.assertIsInstance(output["start"], int) self.assertIsInstance(output["end"], int) - self.assertEqual(output["answer"], "Clara") + self.assertEqual(output["answer"], "42") def test_sentence_similarity(self) -> None: scores = self.client.sentence_similarity(