emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[elpa] externals/llm 636014bf64 08/34: Make all remaining code async-fri


From: Andrew Hyatt
Subject: [elpa] externals/llm 636014bf64 08/34: Make all remaining code async-friendly
Date: Sat, 16 Sep 2023 01:32:47 -0400 (EDT)

branch: externals/llm
commit 636014bf64a91d3ddbe3ba14e585e332f2b9820a
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>

    Make all remaining code async-friendly
    
    This finalizes (I hope) async changes to both openai and vertex providers, 
as
    well as the tester.
---
 llm-openai.el | 22 +++++++++++-----------
 llm-tester.el | 39 ++++++++++++++++++++-------------------
 llm-vertex.el | 27 ++++++++++++++++-----------
 3 files changed, 47 insertions(+), 41 deletions(-)

diff --git a/llm-openai.el b/llm-openai.el
index 3bc8a06f17..45dee5fc4d 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -69,11 +69,13 @@ will use a reasonable default."
                                                      (assoc-default 'type 
(cdar data))
                                                      (assoc-default 'message 
(cdar data))))))))
 
-(defun llm-openai--chat-response (prompt response-callback error-callback 
&optional return-json-spec)
+(defun llm-openai--chat-response (provider prompt response-callback 
error-callback &optional return-json-spec)
   "Main method to send a PROMPT as a chat prompt to Open AI.
 RETURN-JSON-SPEC, if specified, is a JSON spec to return from the
 Open AI API.
 
+PROVIDER is a `llm-openai' struct which holds the key and other options.
+
 RESPONSE-CALLBACK is a function to call with the LLM response.
 
 ERROR-CALLBACK is called if there is an error, with the error
@@ -121,22 +123,20 @@ signal and message."
                              ("Content-Type" . "application/json"))
                   :data (json-encode request-alist)
                   :parser 'json-read
+                  :success (cl-function
+                            (lambda (&key data &allow-other-keys)
+                              (let ((result (cdr (assoc 'content (cdr (assoc 
'message (aref (cdr (assoc 'choices data)) 0))))))
+                                    (func-result (cdr (assoc 'arguments (cdr 
(assoc 'function_call (cdr (assoc 'message (aref (cdr (assoc 'choices data)) 
0)))))))))        
+                                (funcall response-callback (or func-result 
result)))))
                   :error (cl-function (lambda (&key error-thrown data 
&allow-other-keys)
                                         (funcall error-callback
                                                  (format "Problem calling Open 
AI: %s, type: %s message: %s"
                                                          (cdr error-thrown)
                                                          (assoc-default 'type 
(cdar data))
-                                                         (assoc-default 
'message (cdar data)))))))))
-      (let ((result (cdr (assoc 'content (cdr (assoc 'message (aref (cdr 
(assoc 'choices (request-response-data resp))) 0))))))
-            (func-result (cdr (assoc 'arguments (cdr (assoc 'function_call 
(cdr (assoc 'message (aref (cdr (assoc 'choices (request-response-data resp))) 
0)))))))))        
-        (funcall result-callback (or func-result result))))))
-
-(cl-defmethod llm-chat-response ((provider llm-openai) prompt)
-  (llm-openai--chat-response prompt nil))
-
-(cl-defmethod llm-chat-structured-response ((provider llm-openai) prompt spec)
-  (llm-openai--chat-response prompt spec))
+                                                         (assoc-default 
'message (cdar data))))))))))))
 
+(cl-defmethod llm-chat-response-async ((provider llm-openai) prompt 
response-callback error-callback)
+  (llm-openai--chat-response provider prompt response-callback error-callback))
 
 (provide 'llm-openai)
 
diff --git a/llm-tester.el b/llm-tester.el
index 089e5cd5de..78a578d8a1 100644
--- a/llm-tester.el
+++ b/llm-tester.el
@@ -53,25 +53,26 @@
 (defun llm-tester-chat (provider)
   "Test that PROVIDER can interact with the LLM chat."
   (message "Testing provider %s for chat" (type-of provider))
-  (llm-chat-response provider
-                     (make-llm-chat-prompt
-                      :interactions (list
-                                     (make-llm-chat-prompt-interaction
-                                      :role 'user
-                                      :content "Tell me a random cool feature 
of emacs."))
-                      :context "You must answer all questions as if you were 
the butler Jeeves from Jeeves and Wooster.  Start all interactions with the 
phrase, 'Very good, sir.'"
-                      :examples '(("Tell me the capital of France." . "Very 
good, sir.  The capital of France is Paris, which I expect you to be familiar 
with, since you were just there last week with your Aunt Agatha.")
-                                  ("Could you take me to my favorite place?" . 
"Very good, sir.  I believe you are referring to the Drone's Club, which I will 
take you to after you put on your evening attire."))
-                      :temperature 0.5
-                      :max-tokens 100)
-                     (lambda (response)
-                       (if response
-                           (if (> (length response) 0)
-                               (message "SUCCESS: Provider %s provided a 
response %s" (type-of provider) response)
-                             (message "ERROR: Provider %s returned an empty 
response" (type-of provider)))
-                         (message "ERROR: Provider %s did not return any 
response" (type-of provider))))
-                     (lambda (type message)
-                       (message "ERROR: Provider %s returned an error of type 
%s with message %s" (type-of provider) type message))))
+  (llm-chat-response-async
+   provider
+   (make-llm-chat-prompt
+    :interactions (list
+                   (make-llm-chat-prompt-interaction
+                    :role 'user
+                    :content "Tell me a random cool feature of emacs."))
+    :context "You must answer all questions as if you were the butler Jeeves 
from Jeeves and Wooster.  Start all interactions with the phrase, 'Very good, 
sir.'"
+    :examples '(("Tell me the capital of France." . "Very good, sir.  The 
capital of France is Paris, which I expect you to be familiar with, since you 
were just there last week with your Aunt Agatha.")
+                ("Could you take me to my favorite place?" . "Very good, sir.  
I believe you are referring to the Drone's Club, which I will take you to after 
you put on your evening attire."))
+    :temperature 0.5
+    :max-tokens 100)
+   (lambda (response)
+     (if response
+         (if (> (length response) 0)
+             (message "SUCCESS: Provider %s provided a response %s" (type-of 
provider) response)
+           (message "ERROR: Provider %s returned an empty response" (type-of 
provider)))
+       (message "ERROR: Provider %s did not return any response" (type-of 
provider))))
+   (lambda (type message)
+     (message "ERROR: Provider %s returned an error of type %s with message 
%s" (type-of provider) type message))))
 
 (defun llm-tester-all (provider)
   "Test all llm functionality for PROVIDER."
diff --git a/llm-vertex.el b/llm-vertex.el
index 6bcc949079..cbbf165e18 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -69,7 +69,7 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
       (setf (llm-vertex-key provider) result))
     (setf (llm-vertex-key-gentime provider) (current-time))))
 
-(cl-defmethod llm-embedding ((provider llm-vertex) string)
+(cl-defmethod llm-embedding-async ((provider llm-vertex) string 
vector-callback error-callback)
   (llm-vertex-refresh-key provider)
   (let ((resp (request (format 
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict";
                                llm-vertex-gcloud-region
@@ -81,13 +81,16 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
                            ("Content-Type" . "application/json"))
                 :data (json-encode `(("instances" . [(("content" . 
,string))])))
                 :parser 'json-read
+                :success (cl-function
+                          (lambda (&key data &allow-other-keys)
+                            (funcall vector-callback
+                                     (cdr (assoc 'values (cdr (assoc 
'embeddings (aref (cdr (assoc 'predictions data)) 0))))))))
                 :error (cl-function (lambda (&key error-thrown data 
&allow-other-keys)
-                                      (error (format "Problem calling GCloud 
AI: %s"
-                                                     (cdr error-thrown)))))
-                :sync t)))
-    (cdr (assoc 'values (cdr (assoc 'embeddings (aref (cdr (assoc 'predictions 
(request-response-data resp))) 0)))))))
+                                      (funcall error-callback
+                                               (error (format "Problem calling 
GCloud AI: %s"
+                                                     (cdr 
error-thrown)))))))))))
 
-(cl-defmethod llm-chat-response ((provider llm-vertex) prompt)
+(cl-defmethod llm-chat-response-async ((provider llm-vertex) prompt 
response-callback error-callback)
   (llm-vertex-refresh-key provider)
   (let ((request-alist))
     (when (llm-chat-prompt-context prompt)
@@ -123,14 +126,16 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
                                  ("Content-Type" . "application/json"))
                       :data (json-encode `(("instances" . [,request-alist])))
                       :parser 'json-read
+                      :success (cl-function (lambda (&key data 
&allow-other-keys)
+                                              (funcall response-callback
+                                                       (cdr (assoc 'content 
(aref (cdr (assoc 'candidates (aref (cdr (assoc 'predictions data)) 0))) 0))))))
                       :error (cl-function (lambda (&key error-thrown data 
&allow-other-keys)
-                                          (error (format "Problem calling 
GCloud AI: %s, status: %s message: %s (%s)"
-                                                           (cdr error-thrown)
+                                            (funcall error-callback 'error
+                                                     (error (format "Problem 
calling GCloud AI: %s, status: %s message: %s (%s)"
+                                                                    'error(cdr 
error-thrown)
                                                            (assoc-default 
'status (assoc-default 'error data))
                                                            (assoc-default 
'message (assoc-default 'error data))
-                                                           data))))
-                      :sync t)))
-      (cdr (assoc 'content (aref (cdr (assoc 'candidates (aref (cdr (assoc 
'predictions (request-response-data resp))) 0))) 0))))))
+                                                           data)))))))))))
 
 (provide 'llm-vertex)
 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]