|
1 | | -import unittest |
| 1 | +import re |
| 2 | +import sys |
| 3 | +from contextlib import nullcontext |
| 4 | +from pathlib import Path |
| 5 | +from typing import cast |
2 | 6 |
|
3 | | -from parameterized import parameterized |
| 7 | +import pytest |
4 | 8 |
|
5 | 9 | from tensorrt_llm import LLM |
6 | 10 | from tensorrt_llm.llmapi import KvCacheConfig |
7 | 11 | from tensorrt_llm.sampling_params import SamplingParams |
8 | 12 |
|
9 | 13 | # isort: off |
10 | | -from utils.util import unittest_name_func, similar |
| 14 | +from utils.util import similar |
11 | 15 | from utils.llm_data import llm_models_root |
12 | 16 | # isort: on |
13 | 17 |
|
| 18 | +from llmapi.apps.openai_server import RemoteOpenAIServer |
14 | 19 |
|
15 | | -class TestOutOfTree(unittest.TestCase): |
16 | 20 |
|
17 | | - @parameterized.expand([False, True], name_func=unittest_name_func) |
18 | | - def test_llm_api(self, import_oot_code: bool): |
19 | | - if import_oot_code: |
20 | | - # Import out-of-tree modeling code for OPTForCausalLM |
21 | | - import os |
22 | | - import sys |
23 | | - sys.path.append( |
24 | | - os.path.join( |
25 | | - os.path.dirname(__file__), |
26 | | - '../../../../examples/llm-api/out_of_tree_example')) |
27 | | - import modeling_opt # noqa |
| 21 | +class TestOutOfTree: |
28 | 22 |
|
29 | | - model_dir = str(llm_models_root() / "opt-125m") |
30 | | - kv_cache_config = KvCacheConfig(free_gpu_memory_fraction=0.4) |
| 23 | + @pytest.fixture |
| 24 | + @staticmethod |
| 25 | + def oot_path() -> Path: |
| 26 | + return Path( |
| 27 | + __file__ |
| 28 | + ).parent / ".." / ".." / ".." / ".." / "examples" / "llm-api" / "out_of_tree_example" |
31 | 29 |
|
32 | | - if not import_oot_code: |
33 | | - with self.assertRaises(RuntimeError): |
34 | | - # estimate_max_kv_cache_tokens will create a request of max_num_tokens for forward. |
35 | | - # Default 8192 will exceed the max length of absolute positional embedding in OPT, leading to out of range indexing. |
36 | | - llm = LLM(model=model_dir, |
37 | | - kv_cache_config=kv_cache_config, |
38 | | - max_num_tokens=2048) |
39 | | - return |
40 | | - |
41 | | - llm = LLM(model=model_dir, |
42 | | - kv_cache_config=kv_cache_config, |
43 | | - max_num_tokens=2048, |
44 | | - disable_overlap_scheduler=True) |
45 | | - |
46 | | - prompts = [ |
| 30 | + @pytest.fixture |
| 31 | + @staticmethod |
| 32 | + def model_dir() -> Path: |
| 33 | + models_root = llm_models_root() |
| 34 | + assert models_root is not None |
| 35 | + return models_root / "opt-125m" |
| 36 | + |
| 37 | + @pytest.fixture |
| 38 | + @staticmethod |
| 39 | + def prompts() -> list[str]: |
| 40 | + return [ |
47 | 41 | "Hello, my name is", |
48 | 42 | "The president of the United States is", |
49 | 43 | "The capital of France is", |
50 | 44 | "The future of AI is", |
51 | 45 | ] |
52 | 46 |
|
53 | | - references = [ |
| 47 | + @pytest.fixture |
| 48 | + @staticmethod |
| 49 | + def references() -> list[str]: |
| 50 | + return [ |
54 | 51 | " J.C. and I am a student at", |
55 | 52 | " not a racist. He is a racist.\n", |
56 | 53 | " the capital of the French Republic.\n\nThe", |
57 | 54 | " in the hands of the people.\n\nThe", |
58 | 55 | ] |
59 | 56 |
|
60 | | - sampling_params = SamplingParams(max_tokens=10) |
61 | | - with llm: |
62 | | - outputs = llm.generate(prompts, sampling_params=sampling_params) |
| 57 | + @pytest.fixture |
| 58 | + @staticmethod |
| 59 | + def sampling_params() -> SamplingParams: |
| 60 | + return SamplingParams(max_tokens=10) |
| 61 | + |
| 62 | + @pytest.fixture |
| 63 | + @staticmethod |
| 64 | + def max_num_tokens() -> int: |
| 65 | + # estimate_max_kv_cache_tokens will create a request of max_num_tokens for forward. |
| 66 | + # Default 8192 will exceed the max length of absolute positional embedding in OPT, leading to out of range indexing. |
| 67 | + return 2048 |
| 68 | + |
| 69 | + @pytest.mark.parametrize("import_oot_code", [False, True]) |
| 70 | + def test_llm_api( |
| 71 | + self, |
| 72 | + import_oot_code: bool, |
| 73 | + oot_path: Path, |
| 74 | + model_dir: Path, |
| 75 | + prompts: list[str], |
| 76 | + references: list[str], |
| 77 | + sampling_params: SamplingParams, |
| 78 | + max_num_tokens: int, |
| 79 | + ): |
| 80 | + if import_oot_code: |
| 81 | + # Import out-of-tree modeling code for OPTForCausalLM |
| 82 | + sys.path.append(str(oot_path)) |
| 83 | + import modeling_opt # noqa |
| 84 | + |
| 85 | + with (nullcontext() if import_oot_code else |
| 86 | + pytest.raises(RuntimeError, |
| 87 | + match=".*Executor worker returned error.*")) as ctx: |
| 88 | + with LLM( |
| 89 | + model=str(model_dir), |
| 90 | + kv_cache_config=KvCacheConfig(free_gpu_memory_fraction=0.4), |
| 91 | + max_num_tokens=max_num_tokens, |
| 92 | + ) as llm: |
| 93 | + outputs = llm.generate(prompts, sampling_params=sampling_params) |
| 94 | + |
| 95 | + for output, ref in zip(outputs, references): |
| 96 | + assert similar(output.outputs[0].text, ref) |
| 97 | + |
| 98 | + if not import_oot_code: |
| 99 | + exc_val = cast(pytest.ExceptionInfo, ctx).value |
| 100 | + assert re.match( |
| 101 | + ".*Unknown architecture for AutoModelForCausalLM: OPTForCausalLM.*", |
| 102 | + str(exc_val.__cause__), |
| 103 | + ) is not None |
| 104 | + |
| 105 | + @pytest.mark.parametrize("import_oot_code", [False, True]) |
| 106 | + def test_serve( |
| 107 | + self, |
| 108 | + import_oot_code: bool, |
| 109 | + oot_path: Path, |
| 110 | + model_dir: Path, |
| 111 | + prompts: list[str], |
| 112 | + references: list[str], |
| 113 | + sampling_params: SamplingParams, |
| 114 | + max_num_tokens: int, |
| 115 | + ): |
| 116 | + with (nullcontext() |
| 117 | + if import_oot_code else pytest.raises(RuntimeError)): |
| 118 | + args = [] |
| 119 | + args.extend(["--kv_cache_free_gpu_memory_fraction", |
| 120 | + "0.2"]) # for co-existence with other servers |
| 121 | + args.extend(["--max_num_tokens", str(max_num_tokens)]) |
| 122 | + if import_oot_code: |
| 123 | + args.extend(["--custom_module_dirs", str(oot_path)]) |
| 124 | + with RemoteOpenAIServer(str(model_dir), args) as remote_server: |
| 125 | + client = remote_server.get_client() |
| 126 | + result = client.completions.create( |
| 127 | + model="model_name", |
| 128 | + prompt=prompts, |
| 129 | + max_tokens=sampling_params.max_tokens, |
| 130 | + temperature=0.0, |
| 131 | + ) |
63 | 132 |
|
64 | | - for output, ref in zip(outputs, references): |
65 | | - assert similar(output.outputs[0].text, ref) |
| 133 | + for choice, ref in zip(result.choices, references): |
| 134 | + assert similar(choice.text, ref) |
0 commit comments