experiment with google ai and fix source block
This commit is contained in:
parent
f0789a34ff
commit
56c3071d55
3 changed files with 113 additions and 77 deletions
|
@ -7,7 +7,8 @@
|
|||
;;
|
||||
;; it is possible there are more so probably the most recent one is the one to use.
|
||||
|
||||
(setq elpaca-core-date "20241111")
|
||||
(if emacs-build-time
|
||||
(setq elpaca-core-date (format-time-string "%Y%m%d" emacs-build-time)))
|
||||
(defvar elpaca-installer-version 0.8)
|
||||
(defvar elpaca-directory (expand-file-name "elpaca/" user-emacs-directory))
|
||||
(defvar elpaca-builds-directory (expand-file-name "builds/" elpaca-directory))
|
||||
|
|
67
init.org
67
init.org
|
@ -50,7 +50,8 @@ one to use.
|
|||
;;
|
||||
;; it is possible there are more so probably the most recent one is the one to use.
|
||||
|
||||
(setq elpaca-core-date "20241111")
|
||||
(if emacs-build-time
|
||||
(setq elpaca-core-date (format-time-string "%Y%m%d" emacs-build-time)))
|
||||
(defvar elpaca-installer-version 0.8)
|
||||
(defvar elpaca-directory (expand-file-name "elpaca/" user-emacs-directory))
|
||||
(defvar elpaca-builds-directory (expand-file-name "builds/" elpaca-directory))
|
||||
|
@ -988,40 +989,74 @@ whether to put it under writing, comms or programming as it is equally
|
|||
#+BEGIN_SRC emacs-lisp
|
||||
(use-package llm
|
||||
:ensure t
|
||||
:commands (llm-chat llm-ask-about llm-ask-line llm-ask-selection))
|
||||
:commands (llm-chat llm-ask-about llm-ask-line llm-ask-selection make-llm-openai make-llm-gemini)
|
||||
:custom
|
||||
(llm-vertex-gcloud-region "europe-west3")
|
||||
:config
|
||||
(require 'llm-openai)
|
||||
(require 'llm-vertex)
|
||||
)
|
||||
#+END_SRC
|
||||
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
(use-package llm-openai
|
||||
:ensure nil
|
||||
:requires llm
|
||||
:commands (make-llm-openai))
|
||||
#+END_SRC
|
||||
#+RESULTS:
|
||||
: [nil 26492 6451 233211 nil elpaca-process-queues nil nil 232000 nil]
|
||||
|
||||
#+RESULTS:g
|
||||
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
(use-package ellama
|
||||
:ensure t
|
||||
:requires (llm llm-openai)
|
||||
:requires (llm)
|
||||
:commands (ellama-chat ellama-ask-about ellama-ask-line ellama-ask-selection)
|
||||
:custom
|
||||
(ellama-language "English")
|
||||
(ellama-provider
|
||||
(make-llm-openai
|
||||
:key (auth-source-pass-get 'secret "snamellit/openai-api-key")
|
||||
:chat-model "gpt-4o"
|
||||
))
|
||||
(ellama-sessions-directory (expand-file-name "~/Nextcloud/ellama-sessions"))
|
||||
:bind-keymap
|
||||
("C-c e" . ellama-command-map))
|
||||
("C-c e" . ellama-command-map)
|
||||
:init
|
||||
(setopt ellama-providers
|
||||
'(("openai" . (make-llm-openai
|
||||
:key (auth-source-pass-get 'secret "snamellit/openai-api-key")
|
||||
:chat-model "gpt-4o"))
|
||||
("unifylearn" . (make-llm-vertex
|
||||
:project "com-melexis-prod-unifylearn")))))
|
||||
#+END_SRC
|
||||
|
||||
#+RESULTS:
|
||||
: [nil 26420 49222 463525 nil elpaca-process-queues nil nil 237000 nil]
|
||||
: [nil 26492 1502 118737 nil elpaca-process-queues nil nil 57000 nil]
|
||||
|
||||
It seems the *gpt-4o* model provides better responses. I should
|
||||
investigate local models more.
|
||||
|
||||
*** Use Gemini with ellama
|
||||
|
||||
|
||||
|
||||
This is mostly for those who want to use Google Cloud specifically, most users should use Gemini instead, which is easier to set up.
|
||||
|
||||
You can set up with make-llm-vertex, with the following parameters:
|
||||
|
||||
|
||||
| paramter | description |
|
||||
|------------------+-------------------------------------------------------------------------------------------------------------------------|
|
||||
| | |
|
||||
| :project | Your project number from Google Cloud that has Vertex API enabled. |
|
||||
| :chat-model | A model name from the list of Vertex's model names. This is optional, and will default to a reasonable model. |
|
||||
| :embedding-model | A model name from the list of Vertex's embedding model names. This is optional, and will default to a reasonable model. |
|
||||
|
||||
In addition to the provider, which you may want multiple of (for example, to charge against different projects), there are customizable variables:
|
||||
|
||||
- llm-vertex-gcloud-binary: The binary to use for generating the API key.
|
||||
- llm-vertex-gcloud-region: The gcloud region to use. It's good to set this to a region near where you are for best latency. Defaults to "us-central1".
|
||||
|
||||
If you haven't already, you must run the following command before using this:
|
||||
|
||||
|
||||
#+BEGIN_SRC shell :tangle no
|
||||
gcloud beta services identity create --service=aiplatform.googleapis.com --project=PROJECT_ID
|
||||
#+END_SRC
|
||||
|
||||
|
||||
** Dired Configuration
|
||||
Enables an alternative file navigation behavior in Dired, Emacs' directory editor:
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# -*- mode: snippet -*-
|
||||
# name: source block
|
||||
# name: source block emacs-lisp
|
||||
# key: <se
|
||||
# --
|
||||
#+BEGIN_SRC emacs-lisp
|
||||
|
|
Loading…
Reference in a new issue