@@ -116,28 +116,10 @@ class LLMInterface {
116116
117117 /* *
118118 * @brief Generate completion from a prompt
119- * @param prompt The prompt to generate completion from
120- * @return The LLM response
121- */
122- virtual LLMResponse complete (const String& prompt);
123-
124- /* *
125- * @brief Generate completion from a list of messages
126- * @param messages The messages to generate completion from
127- * @return The LLM response
128- */
129- virtual LLMResponse complete (const std::vector<Message>& messages);
130-
131- /* *
132- * @brief Generate completion with available tools
133- * @param messages The messages to generate completion from
134- * @param tools_schema The tools schema to use
135- * @return The LLM response
119+ * @param prompt The prompt
120+ * @return The completion
136121 */
137- virtual LLMResponse completeWithTools (
138- const std::vector<Message>& messages,
139- const std::vector<JsonObject>& tools_schema
140- );
122+ virtual LLMResponse chat (const String& prompt) = 0;
141123
142124 /* *
143125 * @brief Generate completion from a list of messages
@@ -167,20 +149,6 @@ class LLMInterface {
167149 std::function<void (const String&, bool )> callback
168150 ) = 0;
169151
170- /* *
171- * @brief Async complete from a prompt
172- * @param prompt The prompt to generate completion from
173- * @return The LLM response
174- */
175- virtual Task<LLMResponse> completeAsync (const String& prompt);
176-
177- /* *
178- * @brief Async complete from a list of messages
179- * @param messages The messages to generate completion from
180- * @return The LLM response
181- */
182- virtual Task<LLMResponse> completeAsync (const std::vector<Message>& messages);
183-
184152 /* *
185153 * @brief Async chat from a list of messages
186154 * @param messages The messages to generate completion from
0 commit comments