emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[elpa] externals/llm 414d25a625 09/34: Removed various unused things, an


From: Andrew Hyatt
Subject: [elpa] externals/llm 414d25a625 09/34: Removed various unused things, and format fixes
Date: Sat, 16 Sep 2023 01:32:48 -0400 (EDT)

branch: externals/llm
commit 414d25a625201acc0f7b87f6fdb8eca2b48d5bc8
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>

    Removed various unused things, and format fixes
    
    This fixes all byte compile warnings, and notably fixes an incorrect error
    message formatting in the vertex provider.
---
 llm-openai.el |  4 ++--
 llm-vertex.el | 16 ++++++++--------
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/llm-openai.el b/llm-openai.el
index 45dee5fc4d..9478878322 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -117,7 +117,7 @@ signal and message."
             request-alist)
       (push '("function_call" . (("name" . "output"))) request-alist))
     
-    (let* ((resp (request "https://api.openai.com/v1/chat/completions";
+    (request "https://api.openai.com/v1/chat/completions";
                   :type "POST"
                   :headers `(("Authorization" . ,(format "Bearer %s" 
(llm-openai-key provider)))
                              ("Content-Type" . "application/json"))
@@ -133,7 +133,7 @@ signal and message."
                                                  (format "Problem calling Open 
AI: %s, type: %s message: %s"
                                                          (cdr error-thrown)
                                                          (assoc-default 'type 
(cdar data))
-                                                         (assoc-default 
'message (cdar data))))))))))))
+                                                         (assoc-default 
'message (cdar data)))))))))
 
 (cl-defmethod llm-chat-response-async ((provider llm-openai) prompt 
response-callback error-callback)
   (llm-openai--chat-response provider prompt response-callback error-callback))
diff --git a/llm-vertex.el b/llm-vertex.el
index cbbf165e18..41fd97d1e9 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -71,7 +71,7 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
 
 (cl-defmethod llm-embedding-async ((provider llm-vertex) string 
vector-callback error-callback)
   (llm-vertex-refresh-key provider)
-  (let ((resp (request (format 
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict";
+  (request (format 
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict";
                                llm-vertex-gcloud-region
                                (llm-vertex-project provider)
                                llm-vertex-gcloud-region
@@ -87,8 +87,8 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
                                      (cdr (assoc 'values (cdr (assoc 
'embeddings (aref (cdr (assoc 'predictions data)) 0))))))))
                 :error (cl-function (lambda (&key error-thrown data 
&allow-other-keys)
                                       (funcall error-callback
-                                               (error (format "Problem calling 
GCloud AI: %s"
-                                                     (cdr 
error-thrown)))))))))))
+                                               (error (format "Problem calling 
GCloud AI: %s (%S)"
+                                                     (cdr error-thrown) 
data)))))))
 
 (cl-defmethod llm-chat-response-async ((provider llm-vertex) prompt 
response-callback error-callback)
   (llm-vertex-refresh-key provider)
@@ -116,7 +116,7 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
             request-alist))
     (when (llm-chat-prompt-max-tokens prompt)
       (push `("max_tokens" . ,(llm-chat-prompt-max-tokens prompt)) 
request-alist))
-    (let ((resp (request (format 
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict";
+    (request (format 
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict";
                                    llm-vertex-gcloud-region
                                    (llm-vertex-project provider)
                                    llm-vertex-gcloud-region
@@ -132,10 +132,10 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
                       :error (cl-function (lambda (&key error-thrown data 
&allow-other-keys)
                                             (funcall error-callback 'error
                                                      (error (format "Problem 
calling GCloud AI: %s, status: %s message: %s (%s)"
-                                                                    'error(cdr 
error-thrown)
-                                                           (assoc-default 
'status (assoc-default 'error data))
-                                                           (assoc-default 
'message (assoc-default 'error data))
-                                                           data)))))))))))
+                                                                    (cdr 
error-thrown)
+                                                                    
(assoc-default 'status (assoc-default 'error data))
+                                                                    
(assoc-default 'message (assoc-default 'error data))
+                                                                    
data))))))))
 
 (provide 'llm-vertex)
 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]