22 lines
815 B
Python
22 lines
815 B
Python
from transformers import pipeline
|
|
|
|
class LlmWrapper:
|
|
def __init__(self, model_name="Qwen/Qwen3-0.6B", max_new_tokens=256):
|
|
self.model_name = model_name
|
|
self.pipe = pipeline("text-generation", model=model_name)
|
|
self.max_tokens = max_new_tokens
|
|
|
|
def summarize(self, text: str, prompt_template=None) -> str:
|
|
# Default to a lightweight summarization instruction
|
|
prompt = (
|
|
prompt_template or
|
|
f"Summarize the following content briefly:\n\n{text.strip()}\n\nSummary:"
|
|
)
|
|
|
|
messages = [{"role": "user", "content": prompt}]
|
|
try:
|
|
outputs = self.pipe(messages, max_new_tokens=self.max_tokens)
|
|
return outputs[0]["generated_text"].strip()
|
|
except Exception as e:
|
|
return f"[LLM ERROR]: {e}"
|