mellea.backends.openai
mellea.backends.openai._server_type(url: str)
class mellea.backends.openai._ServerType()
class mellea.backends.openai.OpenAIBackend(model_id: str | ModelIdentifier = model_ids.IBM_GRANITE_3_3_8B, formatter: Formatter | None = None, base_url: str | None = None, model_options: dict | None = None, default_to_constraint_checking_alora: bool = True, api_key: str | None = None, **kwargs)
model_id
: A generic model identifier or OpenAI compatible string. Defaults to model_ids.IBM_GRANITE_3_3_8B.formatter
: A custom formatter based on backend.If None, defaults to TemplateFormatterbase_url
: Base url for LLM API. Defaults to None.model_options
: Generation options to pass to the LLM. Defaults to None.default_to_constraint_checking_alora
: If set to False then aloras will be deactivated. This is primarily for performance benchmarking and debugging.api_key
: API key for generation. Defaults to None.mellea.backends.openai.OpenAIBackend.filter_openai_client_kwargs(**kwargs)
mellea.backends.openai.OpenAIBackend.filter_chat_completions_kwargs(model_options: dict)
mellea.backends.openai.OpenAIBackend.filter_completions_kwargs(model_options: dict)
mellea.backends.openai.OpenAIBackend._simplify_and_merge(model_options: dict[str, Any] | None, is_chat_context: bool)
model_options
: the model_options for this callmellea.backends.openai.OpenAIBackend._make_backend_specific_and_remove(model_options: dict[str, Any], is_chat_context: bool)
model_options
: the model_options for this callmellea.backends.openai.OpenAIBackend.generate_from_context(action: Component | CBlock, ctx: Context, format: type[BaseModelSubclass] | None = None, model_options: dict | None = None, generate_logs: list[GenerateLog] | None = None, tool_calls: bool = False)
generate_from_chat_context
.
mellea.backends.openai.OpenAIBackend.generate_from_chat_context(action: Component | CBlock, ctx: Context, format: type[BaseModelSubclass] | None = None, model_options: dict | None = None, generate_logs: list[GenerateLog] | None = None, tool_calls: bool = False)
Formatter
.
mellea.backends.openai.OpenAIBackend._generate_from_chat_context_alora(action: Component | CBlock, ctx: Context, format: type[BaseModelSubclass] | None = None, model_options: dict | None = None, generate_logs: list[GenerateLog] | None = None)
mellea.backends.openai.OpenAIBackend._generate_from_chat_context_standard(action: Component | CBlock, ctx: Context, format: type[BaseModelSubclass] | None = None, model_options: dict | None = None, generate_logs: list[GenerateLog] | None = None, tool_calls: bool = False)
mellea.backends.openai.OpenAIBackend._generate_from_raw(actions: list[Component | CBlock], format: type[BaseModelSubclass] | None = None, model_options: dict | None = None, generate_logs: list[GenerateLog] | None = None)
mellea.backends.openai.OpenAIBackend._extract_model_tool_requests(tools: dict[str, Callable], chat_response: ChatCompletion)
mellea.backends.openai.OpenAIBackend.add_alora(alora: 'OpenAIAlora')
alora
: str
: identifier for the ALora adaptermellea.backends.openai.OpenAIBackend.get_alora(alora_name: str)
mellea.backends.openai.OpenAIBackend.get_aloras()
mellea.backends.openai.OpenAIBackend.apply_chat_template(chat: list[dict[str, str]])
class mellea.backends.openai.OpenAIAlora(name: str, path: str, generation_prompt: str, backend: OpenAIBackend)
name
: str
: An arbitrary name/label to assign to an ALora. This is irrelevant from the alora’s (huggingface) model id.path
: str
: A local path to ALora’s weights or a Huggingface model_id to an ALora.generation_prompt
: str
: A prompt used to “activate” the Lora. This string goes between the pre-activation context and the aLora generate call. This needs to be provided by the entity that trained the ALora.backend
: OpenAIBackend
: Mained as a pointer to the backend to which this this ALora is attached.