Skip to content

Commit df3dc13

Browse files
authored
feat(speculative-sampling): Add speculative sampling (#200)
Signed-off-by: mudler <[email protected]>
1 parent 371ecd1 commit df3dc13

File tree

7 files changed

+335
-117
lines changed

7 files changed

+335
-117
lines changed

binding.cpp

+211-106
Large diffs are not rendered by default.

binding.h

+5-3
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ void* load_model(const char *fname,
2929
bool numa,
3030
float rope_freq_base,
3131
float rope_freq_scale,
32-
bool mul_mat_q, const char *lora, const char *lora_base
32+
bool mul_mat_q, const char *lora, const char *lora_base, bool perplexity
3333
);
3434

3535
int get_embeddings(void* params_ptr, void* state_pr, float * res_embeddings);
@@ -41,8 +41,10 @@ void* llama_allocate_params(const char *prompt, int seed, int threads, int token
4141
int repeat_last_n, bool ignore_eos, bool memory_f16,
4242
int n_batch, int n_keep, const char** antiprompt, int antiprompt_count,
4343
float tfs_z, float typical_p, float frequency_penalty, float presence_penalty, int mirostat, float mirostat_eta, float mirostat_tau, bool penalize_nl, const char *logit_bias, const char *session_file, bool prompt_cache_all, bool mlock, bool mmap, const char *maingpu, const char *tensorsplit ,
44-
bool prompt_cache_ro, const char *grammar, float rope_freq_base, float rope_freq_scale, float negative_prompt_scale, const char* negative_prompt
45-
);
44+
bool prompt_cache_ro, const char *grammar, float rope_freq_base, float rope_freq_scale, float negative_prompt_scale, const char* negative_prompt,
45+
int n_draft);
46+
47+
int speculative_sampling(void* params_ptr, void* target_model, void* draft_model, char* result, bool debug);
4648

4749
void llama_free_params(void* params_ptr);
4850

llama.cpp

llama.go

+64-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func New(model string, opts ...ModelOption) (*LLama, error) {
4141
C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM),
4242
C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA),
4343
C.float(mo.FreqRopeBase), C.float(mo.FreqRopeScale),
44-
C.bool(MulMatQ), loraAdapter, loraBase,
44+
C.bool(MulMatQ), loraAdapter, loraBase, C.bool(mo.Perplexity),
4545
)
4646

4747
if result == nil {
@@ -123,6 +123,7 @@ func (l *LLama) TokenEmbeddings(tokens []int, opts ...PredictOption) ([]float32,
123123
C.bool(po.PromptCacheRO),
124124
C.CString(po.Grammar),
125125
C.float(po.RopeFreqBase), C.float(po.RopeFreqScale), C.float(po.NegativePromptScale), C.CString(po.NegativePrompt),
126+
C.int(po.NDraft),
126127
)
127128
ret := C.get_token_embeddings(params, l.state, myArray, C.int(len(tokens)), (*C.float)(&floats[0]))
128129
if ret != 0 {
@@ -164,6 +165,7 @@ func (l *LLama) Embeddings(text string, opts ...PredictOption) ([]float32, error
164165
C.bool(po.PromptCacheRO),
165166
C.CString(po.Grammar),
166167
C.float(po.RopeFreqBase), C.float(po.RopeFreqScale), C.float(po.NegativePromptScale), C.CString(po.NegativePrompt),
168+
C.int(po.NDraft),
167169
)
168170

169171
ret := C.get_embeddings(params, l.state, (*C.float)(&floats[0]))
@@ -202,6 +204,7 @@ func (l *LLama) Eval(text string, opts ...PredictOption) error {
202204
C.bool(po.PromptCacheRO),
203205
C.CString(po.Grammar),
204206
C.float(po.RopeFreqBase), C.float(po.RopeFreqScale), C.float(po.NegativePromptScale), C.CString(po.NegativePrompt),
207+
C.int(po.NDraft),
205208
)
206209
ret := C.eval(params, l.state, input)
207210
if ret != 0 {
@@ -213,6 +216,64 @@ func (l *LLama) Eval(text string, opts ...PredictOption) error {
213216
return nil
214217
}
215218

219+
func (l *LLama) SpeculativeSampling(ll *LLama, text string, opts ...PredictOption) (string, error) {
220+
po := NewPredictOptions(opts...)
221+
222+
if po.TokenCallback != nil {
223+
setCallback(l.state, po.TokenCallback)
224+
}
225+
226+
input := C.CString(text)
227+
if po.Tokens == 0 {
228+
po.Tokens = 99999999
229+
}
230+
out := make([]byte, po.Tokens)
231+
232+
reverseCount := len(po.StopPrompts)
233+
reversePrompt := make([]*C.char, reverseCount)
234+
var pass **C.char
235+
for i, s := range po.StopPrompts {
236+
cs := C.CString(s)
237+
reversePrompt[i] = cs
238+
pass = &reversePrompt[0]
239+
}
240+
241+
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
242+
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
243+
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
244+
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
245+
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
246+
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), C.CString(po.LogitBias),
247+
C.CString(po.PathPromptCache), C.bool(po.PromptCacheAll), C.bool(po.MLock), C.bool(po.MMap),
248+
C.CString(po.MainGPU), C.CString(po.TensorSplit),
249+
C.bool(po.PromptCacheRO),
250+
C.CString(po.Grammar),
251+
C.float(po.RopeFreqBase), C.float(po.RopeFreqScale), C.float(po.NegativePromptScale), C.CString(po.NegativePrompt),
252+
C.int(po.NDraft),
253+
)
254+
ret := C.speculative_sampling(params, l.state, ll.state, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
255+
if ret != 0 {
256+
return "", fmt.Errorf("inference failed")
257+
}
258+
res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
259+
260+
res = strings.TrimPrefix(res, " ")
261+
res = strings.TrimPrefix(res, text)
262+
res = strings.TrimPrefix(res, "\n")
263+
264+
for _, s := range po.StopPrompts {
265+
res = strings.TrimRight(res, s)
266+
}
267+
268+
C.llama_free_params(params)
269+
270+
if po.TokenCallback != nil {
271+
setCallback(l.state, nil)
272+
}
273+
274+
return res, nil
275+
}
276+
216277
func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
217278
po := NewPredictOptions(opts...)
218279

@@ -246,6 +307,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
246307
C.bool(po.PromptCacheRO),
247308
C.CString(po.Grammar),
248309
C.float(po.RopeFreqBase), C.float(po.RopeFreqScale), C.float(po.NegativePromptScale), C.CString(po.NegativePrompt),
310+
C.int(po.NDraft),
249311
)
250312
ret := C.llama_predict(params, l.state, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
251313
if ret != 0 {
@@ -294,6 +356,7 @@ func (l *LLama) TokenizeString(text string, opts ...PredictOption) (int32, []int
294356
C.bool(po.PromptCacheRO),
295357
C.CString(po.Grammar),
296358
C.float(po.RopeFreqBase), C.float(po.RopeFreqScale), C.float(po.NegativePromptScale), C.CString(po.NegativePrompt),
359+
C.int(po.NDraft),
297360
)
298361

299362
tokRet := C.llama_tokenize_string(params, l.state, (*C.int)(unsafe.Pointer(&out[0]))) //, C.int(po.Tokens), true)

llama_test.go

+33
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package llama_test
33
import (
44
"os"
55

6+
"github.com/go-skynet/go-llama.cpp"
67
. "github.com/go-skynet/go-llama.cpp"
78
. "github.com/onsi/ginkgo/v2"
89
. "github.com/onsi/gomega"
@@ -45,6 +46,38 @@ how much is 2+2?
4546
Expect(text).To(ContainSubstring("4"), text)
4647
})
4748

49+
It("speculative sampling predicts", func() {
50+
if testModelPath == "" {
51+
Skip("test skipped - only makes sense if the TEST_MODEL environment variable is set.")
52+
}
53+
model, err := New(
54+
testModelPath,
55+
EnableF16Memory,
56+
SetContext(128),
57+
SetMMap(true),
58+
SetNBatch(512),
59+
SetPerplexity(true),
60+
)
61+
Expect(err).ToNot(HaveOccurred())
62+
Expect(model).ToNot(BeNil())
63+
model2, err := New(
64+
testModelPath,
65+
EnableF16Memory,
66+
SetContext(128),
67+
SetMMap(true),
68+
SetNBatch(512),
69+
SetPerplexity(true),
70+
)
71+
Expect(err).ToNot(HaveOccurred())
72+
Expect(model).ToNot(BeNil())
73+
text, err := model.SpeculativeSampling(model2, `[INST] Answer to the following question:
74+
how much is 2+2?
75+
[/INST]`, llama.SetNDraft(16),
76+
)
77+
Expect(err).ToNot(HaveOccurred(), text)
78+
Expect(text).To(ContainSubstring("4"), text)
79+
})
80+
4881
It("tokenizes strings successfully", func() {
4982
if testModelPath == "" {
5083
Skip("test skipped - only makes sense if the TEST_MODEL environment variable is set.")

options.go

+14
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,13 @@ type ModelOptions struct {
1818
MulMatQ *bool
1919
LoraBase string
2020
LoraAdapter string
21+
Perplexity bool
2122
}
2223

2324
type PredictOptions struct {
2425
Seed, Threads, Tokens, TopK, Repeat, Batch, NKeep int
2526
TopP, Temperature, Penalty float32
27+
NDraft int
2628
F16KV bool
2729
DebugMode bool
2830
StopPrompts []string
@@ -193,6 +195,18 @@ func SetRopeFreqScale(rfs float32) PredictOption {
193195
}
194196
}
195197

198+
func SetNDraft(nd int) PredictOption {
199+
return func(p *PredictOptions) {
200+
p.NDraft = nd
201+
}
202+
}
203+
204+
func SetPerplexity(b bool) ModelOption {
205+
return func(p *ModelOptions) {
206+
p.Perplexity = b
207+
}
208+
}
209+
196210
func SetNegativePromptScale(nps float32) PredictOption {
197211
return func(p *PredictOptions) {
198212
p.NegativePromptScale = nps

patches/1902-cuda.patch

+7-6
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
diff --git a/common/common.cpp b/common/common.cpp
2-
index ed09fc2..ced02e8 100644
2+
index 3138213..af93a32 100644
33
--- a/common/common.cpp
44
+++ b/common/common.cpp
5-
@@ -1107,3 +1107,82 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
5+
@@ -1257,3 +1257,83 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
66
fprintf(stream, "typical_p: %f # default: 1.0\n", params.typical_p);
77
fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false");
88
}
@@ -22,7 +22,7 @@ index ed09fc2..ced02e8 100644
2222
+ return lparams;
2323
+}
2424
+
25-
+void* load_binding_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa, float rope_freq_base, float rope_freq_scale, bool mul_mat_q, const char *lora, const char *lora_base) {
25+
+void* load_binding_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa, float rope_freq_base, float rope_freq_scale, bool mul_mat_q, const char *lora, const char *lora_base, bool perplexity) {
2626
+ // load the model
2727
+ gpt_params * lparams = create_gpt_params(fname, lora, lora_base);
2828
+ llama_model * model;
@@ -35,6 +35,7 @@ index ed09fc2..ced02e8 100644
3535
+ lparams->embedding = embeddings;
3636
+ lparams->use_mlock = mlock;
3737
+ lparams->n_gpu_layers = n_gpu_layers;
38+
+ lparams->perplexity = perplexity;
3839
+ lparams->use_mmap = mmap;
3940
+
4041
+ lparams->low_vram = low_vram;
@@ -87,10 +88,10 @@ index ed09fc2..ced02e8 100644
8788
+}
8889
\ No newline at end of file
8990
diff --git a/common/common.h b/common/common.h
90-
index 5a37968..8b09050 100644
91+
index 105fb09..8f60434 100644
9192
--- a/common/common.h
9293
+++ b/common/common.h
93-
@@ -165,3 +165,10 @@ std::string get_sortable_timestamp();
94+
@@ -201,3 +201,10 @@ std::string get_sortable_timestamp();
9495
void dump_non_result_info_yaml(
9596
FILE * stream, const gpt_params & params, const llama_context * lctx,
9697
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
@@ -100,4 +101,4 @@ index 5a37968..8b09050 100644
100101
+ llama_model * model;
101102
+};
102103
+
103-
+void* load_binding_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa, float rope_freq_base, float rope_freq_scale, bool mul_mat_q, const char *lora, const char *lora_base);
104+
+void* load_binding_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa, float rope_freq_base, float rope_freq_scale, bool mul_mat_q, const char *lora, const char *lora_base, bool perplexity);

0 commit comments

Comments
 (0)