Skip to content

Commit e28d80a

Browse files
committed
Add model freedom and ollama support
1 parent 8dc85b4 commit e28d80a

File tree

5 files changed

+215
-12
lines changed

5 files changed

+215
-12
lines changed

docs/source/gemini.png

5.76 KB
Loading

docs/source/magics.rst

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,29 @@ Here are the magics available in xeus-cpp.
1212
%%xassist
1313
========================
1414

15-
Leverage the large language models to assist in your development process. Currently supported models are Gemini - gemini-1.5-flash, OpenAI - gpt-3.5-turbo-16k.
15+
Leverage the large language models to assist in your development process. Currently supported models are Gemini, OpenAI, Ollama.
1616

17-
- Save the api key
17+
- Save the api key (for OpenAI and Gemini)
1818

1919
.. code::
2020
2121
%%xassist model --save-key
2222
key
2323
24+
- Save the model
25+
26+
- Set the response url (for Ollama)
27+
28+
.. code::
29+
30+
%%xassist model --set-url
31+
key
32+
33+
.. code::
34+
35+
%%xassist model --save-model
36+
key
37+
2438
- Use the model
2539

2640
.. code::
@@ -33,9 +47,10 @@ Leverage the large language models to assist in your development process. Curren
3347
.. code::
3448
3549
%%xassist model --refresh
50+
3651
37-
- Example
52+
- Examples
3853

3954
.. image:: gemini.png
4055

41-
A new prompt is sent to the model everytime and the functionality to use previous context will be added soon.
56+
.. image:: ollama.png

docs/source/ollama.png

11.3 KB
Loading

src/xmagics/xassist.cpp

Lines changed: 161 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,80 @@ namespace xcpp
6060
}
6161
};
6262

63+
class model_manager
64+
{
65+
public:
66+
67+
static void save_model(const std::string& model, const std::string& model_name)
68+
{
69+
std::string model_file_path = model + "_model.txt";
70+
std::ofstream out(model_file_path);
71+
if (out)
72+
{
73+
out << model_name;
74+
out.close();
75+
std::cout << "Model saved for model " << model << std::endl;
76+
}
77+
else
78+
{
79+
std::cerr << "Failed to open file for writing model for model " << model << std::endl;
80+
}
81+
}
82+
83+
static std::string load_model(const std::string& model)
84+
{
85+
std::string model_file_path = model + "_model.txt";
86+
std::ifstream in(model_file_path);
87+
std::string model_name;
88+
if (in)
89+
{
90+
std::getline(in, model_name);
91+
in.close();
92+
return model_name;
93+
}
94+
95+
std::cerr << "Failed to open file for reading model for model " << model << std::endl;
96+
return "";
97+
}
98+
};
99+
100+
class url_manager
101+
{
102+
public:
103+
104+
static void save_url(const std::string& model, const std::string& url)
105+
{
106+
std::string url_file_path = model + "_url.txt";
107+
std::ofstream out(url_file_path);
108+
if (out)
109+
{
110+
out << url;
111+
out.close();
112+
std::cout << "URL saved for model " << model << std::endl;
113+
}
114+
else
115+
{
116+
std::cerr << "Failed to open file for writing URL for model " << model << std::endl;
117+
}
118+
}
119+
120+
static std::string load_url(const std::string& model)
121+
{
122+
std::string url_file_path = model + "_url.txt";
123+
std::ifstream in(url_file_path);
124+
std::string url;
125+
if (in)
126+
{
127+
std::getline(in, url);
128+
in.close();
129+
return url;
130+
}
131+
132+
std::cerr << "Failed to open file for reading URL for model " << model << std::endl;
133+
return "";
134+
}
135+
};
136+
63137
class chat_history
64138
{
65139
public:
@@ -209,8 +283,16 @@ namespace xcpp
209283
{
210284
curl_helper curl_helper;
211285
const std::string chat_message = xcpp::chat_history::chat("gemini", "user", cell);
212-
const std::string url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key="
213-
+ key;
286+
const std::string model = xcpp::model_manager::load_model("gemini");
287+
288+
if (model.empty())
289+
{
290+
std::cerr << "Model not found." << std::endl;
291+
return "";
292+
}
293+
294+
const std::string url = "https://generativelanguage.googleapis.com/v1beta/models/" + model
295+
+ ":generateContent?key=" + key;
214296
const std::string post_data = R"({"contents": [ )" + chat_message + R"(]})";
215297

216298
std::string response = curl_helper.perform_request(url, post_data);
@@ -231,13 +313,64 @@ namespace xcpp
231313
return j["candidates"][0]["content"]["parts"][0]["text"];
232314
}
233315

316+
std::string ollama(const std::string& cell)
317+
{
318+
curl_helper curl_helper;
319+
const std::string url = xcpp::url_manager::load_url("ollama");
320+
const std::string chat_message = xcpp::chat_history::chat("ollama", "user", cell);
321+
const std::string model = xcpp::model_manager::load_model("ollama");
322+
323+
if (model.empty())
324+
{
325+
std::cerr << "Model not found." << std::endl;
326+
return "";
327+
}
328+
329+
if (url.empty())
330+
{
331+
std::cerr << "URL not found." << std::endl;
332+
return "";
333+
}
334+
335+
const std::string post_data = R"({
336+
"model": ")" + model
337+
+ R"(",
338+
"messages": [)" + chat_message
339+
+ R"(],
340+
"stream": false
341+
})";
342+
343+
std::string response = curl_helper.perform_request(url, post_data);
344+
345+
json j = json::parse(response);
346+
347+
if (j.find("error") != j.end())
348+
{
349+
std::cerr << "Error: " << j["error"]["message"] << std::endl;
350+
return "";
351+
}
352+
353+
const std::string chat = xcpp::chat_history::chat("ollama", "assistant", j["message"]["content"]);
354+
355+
return j["message"]["content"];
356+
}
357+
234358
std::string openai(const std::string& cell, const std::string& key)
235359
{
236360
curl_helper curl_helper;
237361
const std::string url = "https://api.openai.com/v1/chat/completions";
238362
const std::string chat_message = xcpp::chat_history::chat("openai", "user", cell);
363+
const std::string model = xcpp::model_manager::load_model("openai");
364+
365+
if (model.empty())
366+
{
367+
std::cerr << "Model not found." << std::endl;
368+
return "";
369+
}
370+
239371
const std::string post_data = R"({
240-
"model": "gpt-3.5-turbo-16k",
372+
"model": [)" + model
373+
+ R"(],
241374
"messages": [)" + chat_message
242375
+ R"(],
243376
"temperature": 0.7
@@ -273,7 +406,7 @@ namespace xcpp
273406
std::istream_iterator<std::string>()
274407
);
275408

276-
std::vector<std::string> models = {"gemini", "openai"};
409+
std::vector<std::string> models = {"gemini", "openai", "ollama"};
277410
std::string model = tokens[1];
278411

279412
if (std::find(models.begin(), models.end(), model) == models.end())
@@ -295,13 +428,29 @@ namespace xcpp
295428
xcpp::chat_history::refresh(model);
296429
return;
297430
}
431+
432+
if (tokens[2] == "--save-model")
433+
{
434+
xcpp::model_manager::save_model(model, cell);
435+
return;
436+
}
437+
438+
if (tokens[2] == "--set-url" && model == "ollama")
439+
{
440+
xcpp::url_manager::save_url(model, cell);
441+
return;
442+
}
298443
}
299444

300-
std::string key = xcpp::api_key_manager::load_api_key(model);
301-
if (key.empty())
445+
std::string key;
446+
if (model != "ollama")
302447
{
303-
std::cerr << "API key for model " << model << " is not available." << std::endl;
304-
return;
448+
key = xcpp::api_key_manager::load_api_key(model);
449+
if (key.empty())
450+
{
451+
std::cerr << "API key for model " << model << " is not available." << std::endl;
452+
return;
453+
}
305454
}
306455

307456
std::string response;
@@ -313,6 +462,10 @@ namespace xcpp
313462
{
314463
response = openai(cell, key);
315464
}
465+
else if (model == "ollama")
466+
{
467+
response = ollama(cell);
468+
}
316469

317470
std::cout << response;
318471
}

test/test_interpreter.cpp

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -962,4 +962,39 @@ TEST_SUITE("xassist"){
962962
std::remove("openai_api_key.txt");
963963
}
964964

965+
TEST_CASE("ollama"){
966+
xcpp::xassist assist;
967+
std::string line = "%%xassist ollama --set-url";
968+
std::string cell = "1234";
969+
970+
assist(line, cell);
971+
972+
std::ifstream infile("ollama_url.txt");
973+
std::string content;
974+
std::getline(infile, content);
975+
976+
REQUIRE(content == "1234");
977+
infile.close();
978+
979+
line = "%%xassist ollama --save-model";
980+
cell = "1234";
981+
982+
assist(line, cell);
983+
984+
std::ifstream infile_model("ollama_model.txt");
985+
std::string content_model;
986+
std::getline(infile_model, content_model);
987+
988+
REQUIRE(content_model == "1234");
989+
infile_model.close();
990+
991+
StreamRedirectRAII redirect(std::cerr);
992+
993+
assist("%%xassist openai", "hello");
994+
995+
REQUIRE(!redirect.getCaptured().empty());
996+
997+
std::remove("openai_api_key.txt");
998+
}
999+
9651000
}

0 commit comments

Comments
 (0)