Skip to content

Commit

Permalink
ibm[patch]: add async tests, add tokenize support (langchain-ai#18898)
Browse files Browse the repository at this point in the history
- **Description:** add async tests, add tokenize support
- **Dependencies:**
[ibm-watsonx-ai](https://pypi.org/project/ibm-watsonx-ai/),
  - **Tag maintainer:** 

Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally -> ✅
Please make sure integration_tests passing locally -> ✅

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
  • Loading branch information
2 people authored and Dave Bechberger committed Mar 29, 2024
1 parent f8fc792 commit 474a118
Show file tree
Hide file tree
Showing 4 changed files with 79 additions and 39 deletions.
7 changes: 7 additions & 0 deletions libs/partners/ibm/langchain_ibm/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,3 +419,10 @@ def _stream(
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk

def get_num_tokens(self, text: str) -> int:
response = self.watsonx_model.tokenize(text, return_tokens=False)
return response["result"]["token_count"]

def get_token_ids(self, text: str) -> List[int]:
raise NotImplementedError("API does not support returning token ids.")
55 changes: 27 additions & 28 deletions libs/partners/ibm/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions libs/partners/ibm/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-ibm"
version = "0.1.1"
version = "0.1.2"
description = "An integration package connecting IBM watsonx.ai and LangChain"
authors = ["IBM"]
readme = "README.md"
Expand All @@ -12,7 +12,7 @@ license = "MIT"

[tool.poetry.dependencies]
python = ">=3.10,<4.0"
langchain-core = "^0.1.27"
langchain-core = "^0.1.29"
ibm-watsonx-ai = "^0.2.0"

[tool.poetry.group.test]
Expand Down
52 changes: 43 additions & 9 deletions libs/partners/ibm/tests/integration_tests/test_llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,12 @@

WX_APIKEY = os.environ.get("WATSONX_APIKEY", "")
WX_PROJECT_ID = os.environ.get("WATSONX_PROJECT_ID", "")
MODEL_ID = "google/flan-ul2"


def test_watsonxllm_invoke() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
Expand All @@ -39,7 +40,7 @@ def test_watsonxllm_invoke_with_params() -> None:
}

watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
params=parameters,
Expand All @@ -52,7 +53,7 @@ def test_watsonxllm_invoke_with_params() -> None:

def test_watsonxllm_generate() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
Expand All @@ -66,7 +67,7 @@ def test_watsonxllm_generate() -> None:

def test_watsonxllm_generate_with_multiple_prompts() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
Expand All @@ -82,7 +83,7 @@ def test_watsonxllm_generate_with_multiple_prompts() -> None:

def test_watsonxllm_generate_stream() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
Expand All @@ -96,7 +97,7 @@ def test_watsonxllm_generate_stream() -> None:

def test_watsonxllm_stream() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
Expand All @@ -119,7 +120,7 @@ def test_watsonxllm_stream() -> None:

def test_watsonxllm_invoke_from_wx_model() -> None:
model = Model(
model_id="google/flan-ul2",
model_id=MODEL_ID,
credentials={
"apikey": WX_APIKEY,
"url": "https://us-south.ml.cloud.ibm.com",
Expand All @@ -135,7 +136,7 @@ def test_watsonxllm_invoke_from_wx_model() -> None:

def test_watsonxllm_invoke_from_wx_model_inference() -> None:
model = ModelInference(
model_id="google/flan-ul2",
model_id=MODEL_ID,
credentials={
"apikey": WX_APIKEY,
"url": "https://us-south.ml.cloud.ibm.com",
Expand All @@ -159,7 +160,7 @@ def test_watsonxllm_invoke_from_wx_model_inference_with_params() -> None:
GenTextParamsMetaNames.TOP_P: 1,
}
model = ModelInference(
model_id="google/flan-ul2",
model_id=MODEL_ID,
credentials={
"apikey": WX_APIKEY,
"url": "https://us-south.ml.cloud.ibm.com",
Expand Down Expand Up @@ -197,3 +198,36 @@ def test_watsonxllm_invoke_from_wx_model_inference_with_params_as_enum() -> None
print(f"\nResponse: {response}")
assert isinstance(response, str)
assert len(response) > 0


async def test_watsonx_ainvoke() -> None:
watsonxllm = WatsonxLLM(
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
response = await watsonxllm.ainvoke("What color sunflower is?")
assert isinstance(response, str)


async def test_watsonx_agenerate() -> None:
watsonxllm = WatsonxLLM(
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
response = await watsonxllm.agenerate(
["What color sunflower is?", "What color turtle is?"]
)
assert len(response.generations) > 0
assert response.llm_output["token_usage"]["generated_token_count"] != 0 # type: ignore


def test_get_num_tokens() -> None:
watsonxllm = WatsonxLLM(
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
num_tokens = watsonxllm.get_num_tokens("What color sunflower is?")
assert num_tokens > 0

0 comments on commit 474a118

Please sign in to comment.