Skip to content

Commit 70a3cb9

Browse files
committed
fix none varible defs and trace ingest
Change-Id: Iab054fc781aed268418a2aa59528900fdccd10a5 fix none varible defs and trace ingest Change-Id: I08bfbcb39557821c6d88cf59883b38c5a7677269
1 parent a7628c1 commit 70a3cb9

File tree

3 files changed

+18
-17
lines changed

3 files changed

+18
-17
lines changed

cozeloop/internal/prompt/prompt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def _format_normal_messages(
162162
variables: Dict[str, PromptVariable]
163163
) -> List[Message]:
164164
results = []
165-
variable_def_map = {var_def.key: var_def for var_def in variable_defs if var_def}
165+
variable_def_map = {var_def.key: var_def for var_def in variable_defs if var_def} if variable_defs else {}
166166

167167
for message in messages:
168168
if message is None:

cozeloop/internal/trace/span.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,8 @@ def set_input(self, input_data):
150150
if isinstance(input_data, ModelInput):
151151
m_content = input_data
152152
for message in input_data.messages:
153-
message_parts.extend(message.parts)
153+
if message.parts:
154+
message_parts.extend(message.parts)
154155

155156
is_multi_modality = self.parse_model_message_parts(message_parts)
156157

examples/prompt/prompt_hub.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,19 @@
1111
from cozeloop.spec.tracespec import CALL_OPTIONS, ModelCallOption, ModelMessage, ModelInput
1212

1313

14+
def convert_model_input(messages: List[Message]) -> ModelInput:
15+
model_messages = []
16+
for message in messages:
17+
model_messages.append(ModelMessage(
18+
role=str(message.role),
19+
content=message.content if message.content is not None else ""
20+
))
21+
22+
return ModelInput(
23+
messages=model_messages
24+
)
25+
26+
1427
class LLMRunner:
1528
def __init__(self, client):
1629
self.client = client
@@ -49,14 +62,14 @@ def llm_call(self, input_data):
4962
span.set_output_tokens(output_token)
5063
# set tag key: `model_name`, e.g., gpt-4-1106-preview, etc.
5164
span.set_model_name("gpt-4-1106-preview")
52-
span.set_tags(CALL_OPTIONS, ModelCallOption(
65+
span.set_tags({CALL_OPTIONS: ModelCallOption(
5366
temperature=0.5,
5467
top_p=0.5,
5568
top_k=10,
5669
presence_penalty=0.5,
5770
frequency_penalty=0.5,
5871
max_tokens=1024,
59-
))
72+
)})
6073

6174
return None
6275
except Exception as e:
@@ -124,16 +137,3 @@ def llm_call(self, input_data):
124137
# Note that flush will block and wait for the report to complete, and it may cause frequent reporting,
125138
# affecting performance.
126139
client.flush()
127-
128-
129-
def convert_model_input(messages: List[Message]) -> ModelInput:
130-
model_messages = []
131-
for message in messages:
132-
model_messages.append(ModelMessage(
133-
role=str(message.role),
134-
content=message.content if message.content is not None else ""
135-
))
136-
137-
return ModelInput(
138-
messages=model_messages
139-
)

0 commit comments

Comments
 (0)