emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[elpa] externals/llm b52958757a 18/34: Fix docstring wider than 80 chara


From: Andrew Hyatt
Subject: [elpa] externals/llm b52958757a 18/34: Fix docstring wider than 80 characters in llm-vertex
Date: Sat, 16 Sep 2023 01:32:48 -0400 (EDT)

branch: externals/llm
commit b52958757aefd1f1aa17f34adb2b79ccf9407afa
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>

    Fix docstring wider than 80 characters in llm-vertex
---
 llm-vertex.el | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/llm-vertex.el b/llm-vertex.el
index e51e9c8d3b..25f0be4259 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -71,8 +71,9 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
 
 (defun llm-vertex--embedding (provider string vector-callback error-callback 
sync)
   "Get the embedding for STRING.
-PROVIDER, VECTOR-CALLBACK, ERROR-CALLBACK are all the same as 
`llm-embedding-async'.
-SYNC, when non-nil, will wait until the response is available to return."
+PROVIDER, VECTOR-CALLBACK, ERROR-CALLBACK are all the same as
+`llm-embedding-async'. SYNC, when non-nil, will wait until the
+response is available to return."
   (llm-vertex-refresh-key provider)
   (request (format 
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict";
                                llm-vertex-gcloud-region
@@ -107,8 +108,9 @@ SYNC, when non-nil, will wait until the response is 
available to return."
 
 (defun llm-vertex--chat-response (provider prompt response-callback 
error-callback sync)
   "Get the chat response for PROMPT.
-PROVIDER, RESPONSE-CALLBACK, ERROR-CALLBACK are all the same as 
`llm-chat-response-async'.
-SYNC, when non-nil, will wait until the response is available to return."
+PROVIDER, RESPONSE-CALLBACK, ERROR-CALLBACK are all the same as
+`llm-chat-response-async'. SYNC, when non-nil, will wait until
+the response is available to return."
   (llm-vertex-refresh-key provider)
   (let ((request-alist))
     (when (llm-chat-prompt-context prompt)



reply via email to

[Prev in Thread] Current Thread [Next in Thread]