diff --git a/talk/LICENSE b/talk/LICENSE deleted file mode 100644 index dbc6d81..0000000 --- a/talk/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Charmbracelet, Inc - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/talk/anim.go b/talk/anim.go deleted file mode 100644 index fed89db..0000000 --- a/talk/anim.go +++ /dev/null @@ -1,267 +0,0 @@ -package main - -import ( - "math/rand" - "strings" - "time" - - "github.com/charmbracelet/bubbles/spinner" - tea "github.com/charmbracelet/bubbletea" - "github.com/charmbracelet/lipgloss" - "github.com/lucasb-eyer/go-colorful" - "github.com/muesli/termenv" - "github.com/tartavull/alfredo/talk/common" -) - -const ( - charCyclingFPS = time.Second / 22 - colorCycleFPS = time.Second / 5 - maxCyclingChars = 120 -) - -var ( - charRunes = []rune("0123456789abcdefABCDEF~!@#$£€%^&*()+=_") - - ellipsisSpinner = spinner.Spinner{ - Frames: []string{"", ".", "..", "..."}, - FPS: time.Second / 3, //nolint:gomnd - } -) - -type charState int - -const ( - charInitialState charState = iota - charCyclingState - charEndOfLifeState -) - -// cyclingChar is a single animated character. -type cyclingChar struct { - finalValue rune // if < 0 cycle forever - currentValue rune - initialDelay time.Duration - lifetime time.Duration -} - -func (c cyclingChar) randomRune() rune { - return (charRunes)[rand.Intn(len(charRunes))] //nolint:gosec -} - -func (c cyclingChar) state(start time.Time) charState { - now := time.Now() - if now.Before(start.Add(c.initialDelay)) { - return charInitialState - } - if c.finalValue > 0 && now.After(start.Add(c.initialDelay)) { - return charEndOfLifeState - } - return charCyclingState -} - -type stepCharsMsg struct{} - -func stepChars() tea.Cmd { - return tea.Tick(charCyclingFPS, func(time.Time) tea.Msg { - return stepCharsMsg{} - }) -} - -type colorCycleMsg struct{} - -func cycleColors() tea.Cmd { - return tea.Tick(colorCycleFPS, func(time.Time) tea.Msg { - return colorCycleMsg{} - }) -} - -// anim is the model that manages the animation that displays while the -// output is being generated. -type anim struct { - start time.Time - cyclingChars []cyclingChar - labelChars []cyclingChar - ramp []lipgloss.Style - label []rune - ellipsis spinner.Model - ellipsisStarted bool - styles common.Styles -} - -func newAnim(cyclingCharsSize uint, label string, r *lipgloss.Renderer, s common.Styles) anim { - n := int(cyclingCharsSize) - if n > maxCyclingChars { - n = maxCyclingChars - } - - gap := " " - if n == 0 { - gap = "" - } - - c := anim{ - start: time.Now(), - label: []rune(gap + label), - ellipsis: spinner.New(spinner.WithSpinner(ellipsisSpinner)), - styles: s, - } - - // If we're in truecolor mode (and there are enough cycling characters) - // color the cycling characters with a gradient ramp. - const minRampSize = 3 - if n >= minRampSize && r.ColorProfile() == termenv.TrueColor { - c.ramp = make([]lipgloss.Style, n, n*2) // double capacity for color cycling - ramp := makeGradientRamp(n) - for i, color := range ramp { - c.ramp[i] = r.NewStyle().Foreground(color) - } - c.ramp = append(c.ramp, reverse(c.ramp)...) // reverse and append for color cycling - } - - makeDelay := func(a int32, b time.Duration) time.Duration { - return time.Duration(rand.Int31n(a)) * (time.Millisecond * b) //nolint:gosec - } - - makeInitialDelay := func() time.Duration { - return makeDelay(8, 60) //nolint:gomnd - } - - // Initial characters that cycle forever. - c.cyclingChars = make([]cyclingChar, n) - - for i := 0; i < n; i++ { - c.cyclingChars[i] = cyclingChar{ - finalValue: -1, // cycle forever - initialDelay: makeInitialDelay(), - } - } - - // Label text that only cycles for a little while. - c.labelChars = make([]cyclingChar, len(c.label)) - - for i, r := range c.label { - c.labelChars[i] = cyclingChar{ - finalValue: r, - initialDelay: makeInitialDelay(), - lifetime: makeDelay(5, 180), //nolint:gomnd - } - } - - return c -} - -// Init initializes the animation. -func (anim) Init() tea.Cmd { - return tea.Batch(stepChars(), cycleColors()) -} - -// Update handles messages. -func (a anim) Update(msg tea.Msg) (tea.Model, tea.Cmd) { - var cmd tea.Cmd - switch msg.(type) { - case stepCharsMsg: - a.updateChars(&a.cyclingChars) - a.updateChars(&a.labelChars) - - if !a.ellipsisStarted { - var eol int - for _, c := range a.labelChars { - if c.state(a.start) == charEndOfLifeState { - eol++ - } - } - if eol == len(a.label) { - // If our entire label has reached end of life, start the - // ellipsis "spinner" after a short pause. - a.ellipsisStarted = true - cmd = tea.Tick(time.Millisecond*220, func(time.Time) tea.Msg { //nolint:gomnd - return a.ellipsis.Tick() - }) - } - } - - return a, tea.Batch(stepChars(), cmd) - case colorCycleMsg: - const minColorCycleSize = 2 - if len(a.ramp) < minColorCycleSize { - return a, nil - } - a.ramp = append(a.ramp[1:], a.ramp[0]) - return a, cycleColors() - case spinner.TickMsg: - var cmd tea.Cmd - a.ellipsis, cmd = a.ellipsis.Update(msg) - return a, cmd - default: - return a, nil - } -} - -func (a *anim) updateChars(chars *[]cyclingChar) { - for i, c := range *chars { - switch c.state(a.start) { - case charInitialState: - (*chars)[i].currentValue = '.' - case charCyclingState: - (*chars)[i].currentValue = c.randomRune() - case charEndOfLifeState: - (*chars)[i].currentValue = c.finalValue - } - } -} - -// View renders the animation. -func (a anim) View() string { - var b strings.Builder - - for i, c := range a.cyclingChars { - if len(a.ramp) > i { - b.WriteString(a.ramp[i].Render(string(c.currentValue))) - continue - } - b.WriteRune(c.currentValue) - } - - for _, c := range a.labelChars { - b.WriteRune(c.currentValue) - } - - return b.String() + a.ellipsis.View() -} - -func makeGradientRamp(length int) []lipgloss.Color { - const startColor = "#F967DC" - const endColor = "#6B50FF" - var ( - c = make([]lipgloss.Color, length) - start, _ = colorful.Hex(startColor) - end, _ = colorful.Hex(endColor) - ) - for i := 0; i < length; i++ { - step := start.BlendLuv(end, float64(i)/float64(length)) - c[i] = lipgloss.Color(step.Hex()) - } - return c -} - -func makeGradientText(baseStyle lipgloss.Style, str string) string { - const minSize = 3 - if len(str) < minSize { - return str - } - b := strings.Builder{} - runes := []rune(str) - for i, c := range makeGradientRamp(len(str)) { - b.WriteString(baseStyle.Copy().Foreground(c).Render(string(runes[i]))) - } - return b.String() -} - -func reverse[T any](in []T) []T { - out := make([]T, len(in)) - copy(out, in[:]) - for i, j := 0, len(out)-1; i < j; i, j = i+1, j-1 { - out[i], out[j] = out[j], out[i] - } - return out -} diff --git a/talk/config.go b/talk/config.go deleted file mode 100644 index b9cb992..0000000 --- a/talk/config.go +++ /dev/null @@ -1,259 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "text/template" - - "github.com/adrg/xdg" - "github.com/caarlos0/env/v8" - "github.com/charmbracelet/lipgloss" - "github.com/muesli/termenv" - flag "github.com/spf13/pflag" - "gopkg.in/yaml.v3" - - "github.com/tartavull/alfredo/talk/common" -) - -const configTemplate = ` -# {{ index .Help "apis" }} -# LocalAI setup instructions: https://github.com/go-skynet/LocalAI#example-use-gpt4all-j-model -apis: - openai: - base-url: https://api.openai.com/v1 - models: - gpt-4: - aliases: ["4"] - max-input-chars: 24500 - fallback: gpt-3.5-turbo - gpt-4-32k: - aliases: ["32k"] - max-input-chars: 98000 - fallback: gpt-4 - gpt-3.5-turbo: - aliases: ["35t"] - max-input-chars: 12250 - fallback: gpt-3.5 - gpt-3.5: - aliases: ["35"] - max-input-chars: 12250 - fallback: - localai: - base-url: http://localhost:8080 - models: - ggml-gpt4all-j: - aliases: ["local", "4all"] - max-input-chars: 12250 - fallback: -# {{ index .Help "model" }} -default-model: gpt-4 -# {{ index .Help "max-input-chars" }} -max-input-chars: 12250 -# {{ index .Help "format" }} -format: false -# {{ index .Help "quiet" }} -quiet: false -# {{ index .Help "temp" }} -temp: 1.0 -# {{ index .Help "topp" }} -topp: 1.0 -# {{ index .Help "no-limit" }} -no-limit: false -# {{ index .Help "prompt-args" }} -include-prompt-args: false -# {{ index .Help "prompt" }} -include-prompt: 0 -# {{ index .Help "max-retries" }} -max-retries: 5 -# {{ index .Help "fanciness" }} -fanciness: 10 -# {{ index .Help "status-text" }} -status-text: Generating -# {{ index .Help "max-tokens" }} -# max-tokens: 100 -` - -type config struct { - APIs map[string]API `yaml:"apis"` - Model string `yaml:"default-model" env:"MODEL"` - Markdown bool `yaml:"format" env:"FORMAT"` - Quiet bool `yaml:"quiet" env:"QUIET"` - MaxTokens int `yaml:"max-tokens" env:"MAX_TOKENS"` - MaxInputChars int `yaml:"max-input-chars" env:"MAX_INPUT_CHARS"` - Temperature float32 `yaml:"temp" env:"TEMP"` - TopP float32 `yaml:"topp" env:"TOPP"` - NoLimit bool `yaml:"no-limit" env:"NO_LIMIT"` - IncludePromptArgs bool `yaml:"include-prompt-args" env:"INCLUDE_PROMPT_ARGS"` - IncludePrompt int `yaml:"include-prompt" env:"INCLUDE_PROMPT"` - MaxRetries int `yaml:"max-retries" env:"MAX_RETRIES"` - Fanciness uint `yaml:"fanciness" env:"FANCINESS"` - StatusText string `yaml:"status-text" env:"STATUS_TEXT"` - Auto bool - API string - Models map[string]Model - ShowHelp bool - Prefix string - Version bool - Settings bool - SettingsPath string -} - -func newConfig() (config, error) { - var c config - var content []byte - - help := map[string]string{ - "api": "OpenAI compatible REST API (openai, localai).", - "apis": "Aliases and endpoints for OpenAI compatible REST API.", - "model": "Default model (gpt-3.5-turbo, gpt-4, ggml-gpt4all-j...).", - "max-input-chars": "Default character limit on input to model.", - "format": "Format response as markdown.", - "prompt": "Include the prompt from the arguments and stdin, truncate stdin to specified number of lines.", - "prompt-args": "Include the prompt from the arguments in the response.", - "quiet": "Quiet mode (hide the spinner while loading).", - "help": "Show help and exit.", - "version": "Show version and exit.", - "max-retries": "Maximum number of times to retry API calls.", - "no-limit": "Turn off the client-side limit on the size of the input into the model.", - "max-tokens": "Maximum number of tokens in response.", - "temp": "Temperature (randomness) of results, from 0.0 to 2.0.", - "topp": "TopP, an alternative to temperature that narrows response, from 0.0 to 1.0.", - "fanciness": "Number of cycling characters in the 'generating' animation.", - "status-text": "Text to show while generating.", - "settings": "Open settings in your $EDITOR.", - "auto": "Iteratively refines its output by generating self-prompts based on previous responses", - } - - sp, err := xdg.ConfigFile(filepath.Join("mods", "mods.yml")) - if err != nil { - return c, err - } - c.SettingsPath = sp - if _, err := os.Stat(sp); os.IsNotExist(err) { - tmpl, err := template.New("config").Parse(strings.TrimSpace(configTemplate)) - if err != nil { - return c, err - } - if err := os.MkdirAll(filepath.Dir(sp), 0o700); err != nil { - return c, err - } - - f, err := os.Create(sp) - if err != nil { - return c, err - } - defer func() { _ = f.Close() }() - - m := struct { - Config config - Help map[string]string - }{ - Config: c, - Help: help, - } - if err := tmpl.Execute(f, m); err != nil { - return c, err - } - } else if err != nil { - return c, err - } - content, err = os.ReadFile(sp) - if err != nil { - return c, err - } - if err := yaml.Unmarshal(content, &c); err != nil { - return c, fmt.Errorf("%s: %w", sp, err) - } - - ms := make(map[string]Model) - for ak, av := range c.APIs { - for mk, mv := range av.Models { - mv.Name = mk - mv.API = ak - ms[mk] = mv - for _, a := range mv.Aliases { - ms[a] = mv - } - } - } - c.Models = ms - - err = env.ParseWithOptions(&c, env.Options{Prefix: "MODS_"}) - if err != nil { - return c, err - } - - flag.StringVarP(&c.Model, "model", "m", c.Model, help["model"]) - flag.StringVarP(&c.API, "api", "a", c.API, help["api"]) - flag.BoolVarP(&c.Markdown, "format", "f", c.Markdown, help["format"]) - flag.IntVarP(&c.IncludePrompt, "prompt", "P", c.IncludePrompt, help["prompt"]) - flag.BoolVarP(&c.IncludePromptArgs, "prompt-args", "p", c.IncludePromptArgs, help["prompt-args"]) - flag.BoolVarP(&c.Quiet, "quiet", "q", c.Quiet, help["quiet"]) - flag.BoolVarP(&c.Settings, "settings", "s", false, help["settings"]) - flag.BoolVarP(&c.ShowHelp, "help", "h", false, help["help"]) - flag.BoolVarP(&c.Version, "version", "v", false, help["version"]) - flag.IntVar(&c.MaxRetries, "max-retries", c.MaxRetries, help["max-retries"]) - flag.BoolVar(&c.NoLimit, "no-limit", c.NoLimit, help["no-limit"]) - flag.IntVar(&c.MaxTokens, "max-tokens", c.MaxTokens, help["max-tokens"]) - flag.Float32Var(&c.Temperature, "temp", c.Temperature, help["temp"]) - flag.Float32Var(&c.TopP, "topp", c.TopP, help["topp"]) - flag.UintVar(&c.Fanciness, "fanciness", c.Fanciness, help["fanciness"]) - flag.StringVar(&c.StatusText, "status-text", c.StatusText, help["status-text"]) - flag.BoolVar(&c.Auto, "auto", c.Auto, help["auto"]) - flag.Lookup("prompt").NoOptDefVal = "-1" - flag.Usage = func() { - usage(c) - } - flag.CommandLine.SortFlags = false - flag.Parse() - c.Prefix = strings.Join(flag.Args(), " ") - - return c, nil -} - -func usage(c config) { - if c.Auto { - return - } - - r := lipgloss.DefaultRenderer() - s := common.MakeStyles(r) - appName := filepath.Base(os.Args[0]) - - if r.ColorProfile() == termenv.TrueColor { - appName = makeGradientText(s.AppName, appName) - } - - fmt.Printf("GPT on the command line. Built for pipelines.\n\n") - fmt.Printf( - "Usage:\n %s %s\n\n", - appName, - s.CliArgs.Render("[OPTIONS] [PREFIX TERM]"), - ) - fmt.Println("Options:") - flag.VisitAll(func(f *flag.Flag) { - if f.Shorthand == "" { - fmt.Printf( - " %-42s %s\n", - s.Flag.Render("--"+f.Name), - s.FlagDesc.Render(f.Usage), - ) - } else { - fmt.Printf( - " %s%s %-38s %s\n", - s.Flag.Render("-"+f.Shorthand), - s.FlagComma, - s.Flag.Render("--"+f.Name), - s.FlagDesc.Render(f.Usage), - ) - } - }) - desc, example := randomExample() - fmt.Printf( - "\nExample:\n %s\n %s\n", - s.Comment.Render("# "+desc), - cheapHighlighting(s, example), - ) -} diff --git a/talk/chat/llm_0.json b/talk/data/llm_0.json similarity index 100% rename from talk/chat/llm_0.json rename to talk/data/llm_0.json diff --git a/talk/chat/llm_1.json b/talk/data/llm_1.json similarity index 100% rename from talk/chat/llm_1.json rename to talk/data/llm_1.json diff --git a/talk/plan.md b/talk/data/plan.md similarity index 100% rename from talk/plan.md rename to talk/data/plan.md diff --git a/talk/chat/user_0.json b/talk/data/user_0.json similarity index 100% rename from talk/chat/user_0.json rename to talk/data/user_0.json diff --git a/talk/chat/user_1.json b/talk/data/user_1.json similarity index 100% rename from talk/chat/user_1.json rename to talk/data/user_1.json diff --git a/talk/examples.go b/talk/examples.go deleted file mode 100644 index 349faea..0000000 --- a/talk/examples.go +++ /dev/null @@ -1,37 +0,0 @@ -package main - -import ( - "math/rand" - "regexp" - - "github.com/tartavull/alfredo/talk/common" -) - -var examples = map[string]string{ - "Write new sections for a readme": `cat README.md | mods "write a new section to this README documenting a pdf sharing feature"`, - "Editorialze your video files": `ls ~/vids | mods -f "summarize each of these titles, group them by decade" | glow`, - "Let GPT pick something to watch": `ls ~/vids | mods "Pick 5 action packed shows from the 80s from this list" | gum choose | xargs vlc`, -} - -func randomExample() (string, string) { - keys := make([]string, 0, len(examples)) - for k := range examples { - keys = append(keys, k) - } - desc := keys[rand.Intn(len(keys))] //nolint:gosec - return desc, examples[desc] -} - -func cheapHighlighting(s common.Styles, code string) string { - code = regexp. - MustCompile(`"([^"\\]|\\.)*"`). - ReplaceAllStringFunc(code, func(x string) string { - return s.Quote.Render(x) - }) - code = regexp. - MustCompile(`\|`). - ReplaceAllStringFunc(code, func(x string) string { - return s.Pipe.Render(x) - }) - return code -} diff --git a/talk/main.go b/talk/main.go index ab1f21f..8c9811a 100644 --- a/talk/main.go +++ b/talk/main.go @@ -3,41 +3,16 @@ package main import ( "fmt" "os" - "runtime/debug" tea "github.com/charmbracelet/bubbletea" - "github.com/charmbracelet/glow/editor" "github.com/charmbracelet/lipgloss" "github.com/mattn/go-isatty" "github.com/muesli/termenv" - flag "github.com/spf13/pflag" -) -// Build vars. -var ( - //nolint: gochecknoglobals - version = "dev" - commit = "" - date = "" - builtBy = "" + "github.com/tartavull/alfredo/talk/common" + "github.com/tartavull/alfredo/talk/ui" ) -func buildVersion() string { - result := "mods version " + version - if commit != "" { - result = fmt.Sprintf("%s\ncommit: %s", result, commit) - } - if date != "" { - result = fmt.Sprintf("%s\nbuilt at: %s", result, date) - } - if builtBy != "" { - result = fmt.Sprintf("%s\nbuilt by: %s", result, builtBy) - } - if info, ok := debug.ReadBuildInfo(); ok && info.Main.Sum != "" { - result = fmt.Sprintf("%s\nmodule version: %s, checksum: %s", result, info.Main.Version, info.Main.Sum) - } - return result -} func main() { renderer := lipgloss.NewRenderer(os.Stderr, termenv.WithColorCache(true)) @@ -45,40 +20,13 @@ func main() { if !isatty.IsTerminal(os.Stdin.Fd()) { opts = append(opts, tea.WithInput(nil)) } - mods := newMods(renderer) - p := tea.NewProgram(mods, opts...) - m, err := p.Run() + s := common.MakeStyles(renderer) + c := common.NewCommon(0,0, &s) + + p := tea.NewProgram(ui.New(&c), opts...) + _, err := p.Run() if err != nil { fmt.Println(err) os.Exit(1) } - mods = m.(*Mods) - if mods.Error != nil { - os.Exit(1) - } - if mods.Config.Settings { - c := editor.Cmd(mods.Config.SettingsPath) - c.Stdin = os.Stdin - c.Stdout = os.Stdout - c.Stderr = os.Stderr - if err := c.Run(); err != nil { - mods.Error = &modsError{reason: "Missing $EDITOR", err: err} - fmt.Println(mods.ErrorView()) - os.Exit(1) - } - fmt.Println("Wrote config file to:", mods.Config.SettingsPath) - os.Exit(0) - } - if mods.Config.Version { - fmt.Println(buildVersion()) - os.Exit(0) - } - if mods.Config.Auto { - // Display a text edit? - } - if mods.Config.ShowHelp || (mods.Input == "" && mods.Config.Prefix == "") { - flag.Usage() - os.Exit(0) - } - fmt.Println(mods.FormattedOutput()) } diff --git a/talk/model.go b/talk/model.go deleted file mode 100644 index 2c7a993..0000000 --- a/talk/model.go +++ /dev/null @@ -1,16 +0,0 @@ -package main - -// Model represents the LLM model used in the API call. -type Model struct { - Name string - API string - MaxChars int `yaml:"max-input-chars"` - Aliases []string `yaml:"aliases"` - Fallback string `yaml:"fallback"` -} - -// API represents an API endpoint and its models. -type API struct { - BaseURL string `yaml:"base-url"` - Models map[string]Model `yaml:"models"` -} diff --git a/talk/mods.go b/talk/mods.go deleted file mode 100644 index 085907d..0000000 --- a/talk/mods.go +++ /dev/null @@ -1,349 +0,0 @@ -package main - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "math" - "net/http" - "os" - "strings" - "time" - - tea "github.com/charmbracelet/bubbletea" - "github.com/charmbracelet/lipgloss" - "github.com/mattn/go-isatty" - openai "github.com/sashabaranov/go-openai" - "github.com/tartavull/alfredo/talk/auto" - "github.com/tartavull/alfredo/talk/common" -) - -const markdownPrefix = "Format the response as Markdown." - -type state int - -const ( - stateStart state = iota - stateConfigLoaded - stateCompletion - stateAuto - stateError -) - -// Mods is the Bubble Tea model that manages reading stdin and querying the -// OpenAI API. -type Mods struct { - Config config - Output string - Input string - Error *modsError - state state - retries int - styles common.Styles - renderer *lipgloss.Renderer - anim tea.Model - width int - height int - auto *auto.Auto -} - -func newMods(r *lipgloss.Renderer) *Mods { - s := common.MakeStyles(r) - c := common.NewCommon(0,0, &s) - return &Mods{ - state: stateStart, - renderer: r, - styles: s, - auto: auto.New(&c), - } - -} - -// completionInput is a tea.Msg that wraps the content read from stdin. -type completionInput struct{ content string } - -// completionOutput a tea.Msg that wraps the content returned from openai. -type completionOutput struct{ content string } - -// modsError is a wrapper around an error that adds additional context. -type modsError struct { - err error - reason string -} - -func (m modsError) Error() string { - return m.err.Error() -} - -// Init implements tea.Model. -func (m *Mods) Init() tea.Cmd { - return m.loadConfigCmd -} - -// Update implements tea.Model. -func (m *Mods) Update(msg tea.Msg) (tea.Model, tea.Cmd) { - switch msg := msg.(type) { - case config: - m.Config = msg - m.state = stateConfigLoaded - if m.Config.ShowHelp || m.Config.Version || m.Config.Settings { - return m, tea.Quit - } - m.anim = newAnim(m.Config.Fanciness, m.Config.StatusText, m.renderer, m.styles) - if !m.Config.Auto { - return m, tea.Batch(readStdinCmd, m.anim.Init()) - } - case completionInput: - if msg.content == "" && m.Config.Prefix == "" { - return m, tea.Quit - } - if msg.content != "" { - m.Input = msg.content - } - m.state = stateCompletion - return m, m.startCompletionCmd(msg.content) - case completionOutput: - m.Output = msg.content - return m, tea.Quit - case modsError: - m.Error = &msg - m.state = stateError - return m, tea.Quit - case tea.WindowSizeMsg: - m.width, m.height = msg.Width, msg.Height - case tea.KeyMsg: - switch msg.String() { - case "esc": - return m, tea.Quit - } - } - m.state = stateAuto - model, cmd := m.auto.Update(msg) - m.auto = model.(*auto.Auto) - return m, cmd -} - -// View implements tea.Model. -func (m *Mods) View() string { - //nolint:exhaustive - if m.Config.Auto { - return m.auto.View() - } - switch m.state { - case stateError: - return m.ErrorView() - case stateCompletion: - if !m.Config.Quiet { - return m.anim.View() - } - case stateAuto: - return m.auto.View() - } - return "" -} - -// ErrorView renders the currently set modsError -func (m Mods) ErrorView() string { - const maxWidth = 120 - const horizontalPadding = 2 - w := m.width - (horizontalPadding * 2) - if w > maxWidth { - w = maxWidth - } - s := m.renderer.NewStyle().Width(w).Padding(0, horizontalPadding) - return fmt.Sprintf( - "\n%s\n\n%s\n\n", - s.Render(m.styles.ErrorHeader.String(), m.Error.reason), - s.Render(m.styles.ErrorDetails.Render(m.Error.Error())), - ) -} - -// FormattedOutput returns the response from OpenAI with the user configured -// prefix and standard in settings. -func (m *Mods) FormattedOutput() string { - prefixFormat := "> %s\n\n---\n\n%s" - stdinFormat := "```\n%s```\n\n---\n\n%s" - out := m.Output - - if m.Config.IncludePrompt != 0 { - if m.Config.IncludePrompt < 0 { - out = fmt.Sprintf(stdinFormat, m.Input, out) - } - scanner := bufio.NewScanner(strings.NewReader(m.Input)) - i := 0 - in := "" - for scanner.Scan() { - if i == m.Config.IncludePrompt { - break - } - in += (scanner.Text() + "\n") - i++ - } - out = fmt.Sprintf(stdinFormat, in, out) - } - - if m.Config.IncludePromptArgs || m.Config.IncludePrompt != 0 { - out = fmt.Sprintf(prefixFormat, m.Config.Prefix, out) - } - - return out -} - -func (m *Mods) retry(content string, err modsError) tea.Msg { - m.retries++ - if m.retries >= m.Config.MaxRetries { - return err - } - wait := time.Millisecond * 100 * time.Duration(math.Pow(2, float64(m.retries))) //nolint:gomnd - time.Sleep(wait) - return completionInput{content} -} - -func (m *Mods) loadConfigCmd() tea.Msg { - cfg, err := newConfig() - if err != nil { - return modsError{err, "There was an error in your config file."} - } - return cfg -} - -func (m *Mods) startCompletionCmd(content string) tea.Cmd { - return func() tea.Msg { - var ok bool - var mod Model - var key string - cfg := m.Config - mod, ok = cfg.Models[cfg.Model] - if !ok { - if cfg.API == "" { - return modsError{ - reason: "Model " + m.styles.InlineCode.Render(cfg.Model) + " is not in the settings file.", - err: fmt.Errorf("Please specify an API endpoint with %s or configure the model in the settings: %s", m.styles.InlineCode.Render("--api"), m.styles.InlineCode.Render("mods -s")), - } - } - mod.Name = cfg.Model - mod.API = cfg.API - mod.MaxChars = cfg.MaxInputChars - } - - if mod.API == "openai" { - key = os.Getenv("OPENAI_API_KEY") - if key == "" { - return modsError{ - reason: m.styles.InlineCode.Render("OPENAI_API_KEY") + " environment variabled is required.", - err: fmt.Errorf("You can grab one at %s", m.styles.Link.Render("https://platform.openai.com/account/api-keys.")), - } - } - } - ccfg := openai.DefaultConfig(key) - api, ok := cfg.APIs[mod.API] - if !ok { - eps := make([]string, 0) - for k := range cfg.APIs { - eps = append(eps, m.styles.InlineCode.Render(k)) - } - return modsError{ - reason: fmt.Sprintf("The API endpoint %s is not configured ", m.styles.InlineCode.Render(cfg.API)), - err: fmt.Errorf("Your configured API endpoints are: %s", eps), - } - } - ccfg.BaseURL = api.BaseURL - client := openai.NewClientWithConfig(ccfg) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - prefix := cfg.Prefix - if cfg.Markdown { - prefix = fmt.Sprintf("%s %s", prefix, markdownPrefix) - } - if prefix != "" { - content = strings.TrimSpace(prefix + "\n\n" + content) - } - if !cfg.NoLimit { - if len(content) > mod.MaxChars { - content = content[:mod.MaxChars] - } - } - - resp, err := client.CreateChatCompletion( - ctx, - openai.ChatCompletionRequest{ - Model: mod.Name, - Temperature: noOmitFloat(cfg.Temperature), - TopP: noOmitFloat(cfg.TopP), - MaxTokens: cfg.MaxTokens, - Messages: []openai.ChatCompletionMessage{ - { - Role: openai.ChatMessageRoleUser, - Content: content, - }, - }, - }, - ) - ae := &openai.APIError{} - if errors.As(err, &ae) { - switch ae.HTTPStatusCode { - case http.StatusNotFound: - if mod.Fallback != "" { - m.Config.Model = mod.Fallback - return m.retry(content, modsError{err: err, reason: "OpenAI API server error."}) - } - return modsError{err: err, reason: fmt.Sprintf("Missing model '%s' for API '%s'", cfg.Model, cfg.API)} - case http.StatusBadRequest: - if ae.Code == "context_length_exceeded" { - pe := modsError{err: err, reason: "Maximum prompt size exceeded."} - if cfg.NoLimit { - return pe - } - return m.retry(content[:len(content)-10], pe) - } - // bad request (do not retry) - return modsError{err: err, reason: "OpenAI API request error."} - case http.StatusUnauthorized: - // invalid auth or key (do not retry) - return modsError{err: err, reason: "Invalid OpenAI API key."} - case http.StatusTooManyRequests: - // rate limiting or engine overload (wait and retry) - return m.retry(content, modsError{err: err, reason: "You’ve hit your OpenAI API rate limit."}) - case http.StatusInternalServerError: - if mod.API == "openai" { - return m.retry(content, modsError{err: err, reason: "OpenAI API server error."}) - } - return modsError{err: err, reason: fmt.Sprintf("Error loading model '%s' for API '%s'", mod.Name, mod.API)} - default: - return m.retry(content, modsError{err: err, reason: "Unknown OpenAI API error."}) - } - } - - if err != nil { - return modsError{err: err, reason: "There was a problem with the OpenAI API request."} - } - return completionOutput{resp.Choices[0].Message.Content} - } -} - -func readStdinCmd() tea.Msg { - if !isatty.IsTerminal(os.Stdin.Fd()) { - reader := bufio.NewReader(os.Stdin) - stdinBytes, err := io.ReadAll(reader) - if err != nil { - return modsError{err, "Unable to read stdin."} - } - return completionInput{string(stdinBytes)} - } - return completionInput{""} -} - -// noOmitFloat converts a 0.0 value to a float usable by the OpenAI client -// library, which currently uses Float32 fields in the request struct with the -// omitempty tag. This means we need to use math.SmallestNonzeroFloat32 instead -// of 0.0 so it doesn't get stripped from the request and replaced server side -// with the default values. -// Issue: https://github.com/sashabaranov/go-openai/issues/9 -func noOmitFloat(f float32) float32 { - if f == 0.0 { - return math.SmallestNonzeroFloat32 - } - return f -} diff --git a/talk/auto/auto.go b/talk/ui/ui.go similarity index 58% rename from talk/auto/auto.go rename to talk/ui/ui.go index d3ce9cd..7ee638b 100644 --- a/talk/auto/auto.go +++ b/talk/ui/ui.go @@ -1,4 +1,4 @@ -package auto +package ui import ( "strings" @@ -14,7 +14,7 @@ import ( ) -type Auto struct { +type UI struct { common *common.Common picked pane tabs *tabs.Tabs @@ -36,8 +36,8 @@ func (p pane) String() string { }[p] } -func New(c *common.Common) *Auto { - a := &Auto{ +func New(c *common.Common) *UI { + a := &UI{ common: c, picked: paneChat, tabs: InitTabs(c), @@ -49,50 +49,52 @@ func New(c *common.Common) *Auto { return a } -func (a *Auto) Init() tea.Cmd { - a.panes[0].Init() +func (ui *UI) Init() tea.Cmd { + ui.panes[0].Init() + ui.panes[1].Init() return nil } func InitTabs(c *common.Common) *tabs.Tabs { - ts := make([]string, paneLast) - for i, b := range []pane{paneChat, paneHistory} { - ts[i] = b.String() - } - t := tabs.New(c, ts) + t := tabs.New(c, []string{"Chat", "History"}) return t } -func (a *Auto) SetSize(width, height int) { +func (a *UI) SetSize(width, height int) { a.common.SetSize(width, height) } -func (a *Auto) Update(msg tea.Msg) (tea.Model, tea.Cmd) { +func (ui *UI) Update(msg tea.Msg) (tea.Model, tea.Cmd) { cmds := make([]tea.Cmd, 0) switch msg := msg.(type) { case tabs.ActiveTabMsg: - a.picked = pane(msg) + ui.picked = pane(msg) //TODO sent pane active and inactive messages case tea.WindowSizeMsg: - a.SetSize(msg.Width, msg.Height) + ui.SetSize(msg.Width, msg.Height) + case tea.KeyMsg: + switch msg.String() { + case "esc": + return ui, tea.Quit + } } - _, cmd := a.tabs.Update(msg) + _, cmd := ui.tabs.Update(msg) cmds = append(cmds, cmd) - _, cmd = a.panes[a.picked].Update(msg) + _, cmd = ui.panes[ui.picked].Update(msg) cmds = append(cmds, cmd) - return a, tea.Batch(cmds...) + return ui, tea.Batch(cmds...) } -func (a *Auto) View() string { +func (ui *UI) View() string { var builder strings.Builder - builder.WriteString(a.common.Styles.Logo.Render(" Alfredo ")) + builder.WriteString(ui.common.Styles.Logo.Render(" Alfredo ")) builder.WriteString("\n\n") - builder.WriteString(a.tabs.View()) + builder.WriteString(ui.tabs.View()) builder.WriteString("\n\n") - builder.WriteString(a.panes[a.picked].View()) - return lipgloss.Place(a.common.Width, a.common.Height, + builder.WriteString(ui.panes[ui.picked].View()) + return lipgloss.Place(ui.common.Width, ui.common.Height, lipgloss.Left, lipgloss.Top, - a.common.Styles.App.Render(builder.String())) + ui.common.Styles.App.Render(builder.String())) } diff --git a/talk/auto/auto_test.go b/talk/ui/ui_test.go similarity index 99% rename from talk/auto/auto_test.go rename to talk/ui/ui_test.go index dc64972..42c865d 100644 --- a/talk/auto/auto_test.go +++ b/talk/ui/ui_test.go @@ -1,4 +1,4 @@ -package auto +package ui import ( "testing"