|
package main |
|
|
|
|
|
|
|
import ( |
|
"fmt" |
|
|
|
"github.com/mudler/LocalAI/pkg/grpc/base" |
|
pb "github.com/mudler/LocalAI/pkg/grpc/proto" |
|
gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang" |
|
) |
|
|
|
type LLM struct { |
|
base.SingleThread |
|
|
|
gpt4all *gpt4all.Model |
|
} |
|
|
|
func (llm *LLM) Load(opts *pb.ModelOptions) error { |
|
model, err := gpt4all.New(opts.ModelFile, |
|
gpt4all.SetThreads(int(opts.Threads)), |
|
gpt4all.SetLibrarySearchPath(opts.LibrarySearchPath)) |
|
llm.gpt4all = model |
|
return err |
|
} |
|
|
|
func buildPredictOptions(opts *pb.PredictOptions) []gpt4all.PredictOption { |
|
predictOptions := []gpt4all.PredictOption{ |
|
gpt4all.SetTemperature(float64(opts.Temperature)), |
|
gpt4all.SetTopP(float64(opts.TopP)), |
|
gpt4all.SetTopK(int(opts.TopK)), |
|
gpt4all.SetTokens(int(opts.Tokens)), |
|
} |
|
|
|
if opts.Batch != 0 { |
|
predictOptions = append(predictOptions, gpt4all.SetBatch(int(opts.Batch))) |
|
} |
|
return predictOptions |
|
} |
|
|
|
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) { |
|
return llm.gpt4all.Predict(opts.Prompt, buildPredictOptions(opts)...) |
|
} |
|
|
|
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error { |
|
predictOptions := buildPredictOptions(opts) |
|
|
|
go func() { |
|
llm.gpt4all.SetTokenCallback(func(token string) bool { |
|
results <- token |
|
return true |
|
}) |
|
_, err := llm.gpt4all.Predict(opts.Prompt, predictOptions...) |
|
if err != nil { |
|
fmt.Println("err: ", err) |
|
} |
|
llm.gpt4all.SetTokenCallback(nil) |
|
close(results) |
|
}() |
|
|
|
return nil |
|
} |
|
|