From b996a17d1dee80500b72da77724466d88a7f2783 Mon Sep 17 00:00:00 2001 From: Matheus Richard Date: Fri, 30 Aug 2024 13:05:48 -0300 Subject: [PATCH] Update OpenAIWritter - Update prompt - User newer LLM model --- lib/gold_miner/blog_post/open_ai_writer.rb | 5 +++-- spec/gold_miner/blog_post/open_ai_writer_spec.rb | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/gold_miner/blog_post/open_ai_writer.rb b/lib/gold_miner/blog_post/open_ai_writer.rb index cf37ee2..51946d5 100644 --- a/lib/gold_miner/blog_post/open_ai_writer.rb +++ b/lib/gold_miner/blog_post/open_ai_writer.rb @@ -30,7 +30,8 @@ def give_title_to(gold_nugget) def summarize(gold_nugget) summary = ask_openai <<~PROMPT - Summarize the following markdown message without removing the author's blog link. Return the summary as markdown. + Summarize the following markdown message without removing the author's blog link. + Keep code examples and links, if any. Return the summary as markdown. Message: #{gold_nugget.as_conversation} @@ -48,7 +49,7 @@ def summarize(gold_nugget) def ask_openai(prompt) response = @openai_client.chat( parameters: { - model: "gpt-3.5-turbo", + model: "gpt-4o-mini", messages: [{role: "user", content: prompt.strip}], temperature: 0 } diff --git a/spec/gold_miner/blog_post/open_ai_writer_spec.rb b/spec/gold_miner/blog_post/open_ai_writer_spec.rb index c6ce98e..55abf2a 100644 --- a/spec/gold_miner/blog_post/open_ai_writer_spec.rb +++ b/spec/gold_miner/blog_post/open_ai_writer_spec.rb @@ -206,7 +206,7 @@ request = stub_open_ai_request( token: token, prompt: - "Summarize the following markdown message without removing the author's blog link. Return the summary as markdown.\n\nMessage:\n#{gold_nugget.as_conversation}", + "Summarize the following markdown message without removing the author's blog link.\nKeep code examples and links, if any. Return the summary as markdown.\n\nMessage:\n#{gold_nugget.as_conversation}", response_status: 200, response_body: { "choices" => [{"message" => {"role" => "assistant", "content" => open_ai_summary}}] @@ -231,7 +231,7 @@ request = stub_open_ai_error( token: token, prompt: - "Summarize the following markdown message without removing the author's blog link. Return the summary as markdown.\n\nMessage:\n#{gold_nugget.as_conversation}", + "Summarize the following markdown message without removing the author's blog link.\nKeep code examples and links, if any. Return the summary as markdown.\n\nMessage:\n#{gold_nugget.as_conversation}", response_error: open_ai_error ) writer = described_class.new(open_ai_api_token: token, fallback_writer: stub_fallback_writer) @@ -248,7 +248,7 @@ request = stub_open_ai_error( token: token, prompt: - "Summarize the following markdown message without removing the author's blog link. Return the summary as markdown.\n\nMessage:\n#{gold_nugget.as_conversation}", + "Summarize the following markdown message without removing the author's blog link.\nKeep code examples and links, if any. Return the summary as markdown.\n\nMessage:\n#{gold_nugget.as_conversation}", response_error: "Some error" ) fallback_summary = "[TODO]" @@ -285,7 +285,7 @@ def stub_open_ai_error(token:, prompt:, response_error:) def stub_open_ai_request(token:, prompt:, response_body:, response_status:) stub_request(:post, "https://api.openai.com/v1/chat/completions") .with( - body: %({"model":"gpt-3.5-turbo","messages":[{"role":"user","content":#{prompt.strip.dump}}],"temperature":0}), + body: %({"model":"gpt-4o-mini","messages":[{"role":"user","content":#{prompt.strip.dump}}],"temperature":0}), headers: { "Accept" => "*/*", "Accept-Encoding" => "gzip;q=1.0,deflate;q=0.6,identity;q=0.3",