Skip to content

Commit cc1d331

Browse files
committed
Change according to the review
1 parent c0f04b5 commit cc1d331

File tree

7 files changed

+14
-12
lines changed

7 files changed

+14
-12
lines changed

examples/common.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
110110
invalid_param = true;
111111
break;
112112
}
113-
params.seed = std::stoi(argv[i]);
113+
params.seed = std::stoul(argv[i]);
114114
} else if (arg == "-t" || arg == "--threads") {
115115
if (++i >= argc) {
116116
invalid_param = true;

examples/embedding/embedding.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ int main(int argc, char ** argv) {
2424

2525
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
2626

27-
if (params.seed == -1) {
27+
if (params.seed == LLAMA_DEFAULT_SEED) {
2828
params.seed = time(NULL);
2929
}
3030

examples/main/main.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ int main(int argc, char ** argv) {
9494

9595
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
9696

97-
if (params.seed == -1) {
97+
if (params.seed == LLAMA_DEFAULT_SEED) {
9898
params.seed = time(NULL);
9999
}
100100

examples/perplexity/perplexity.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ int main(int argc, char ** argv) {
136136

137137
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
138138

139-
if (params.seed == -1) {
139+
if (params.seed == LLAMA_DEFAULT_SEED) {
140140
params.seed = time(NULL);
141141
}
142142

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3034,7 +3034,7 @@ int main(int argc, char ** argv) {
30343034
return 1;
30353035
}
30363036

3037-
if (params.seed == -1) {
3037+
if (params.seed == LLAMA_DEFAULT_SEED) {
30383038
params.seed = time(NULL);
30393039
}
30403040
printf("%s: seed: %u\n", __func__, params.seed);

llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -938,7 +938,7 @@ static bool kv_cache_init(
938938

939939
struct llama_context_params llama_context_default_params() {
940940
struct llama_context_params result = {
941-
/*.seed =*/ (unsigned int)-1,
941+
/*.seed =*/ LLAMA_DEFAULT_SEED,
942942
/*.n_ctx =*/ 512,
943943
/*.n_batch =*/ 512,
944944
/*.gpu_layers =*/ 0,
@@ -2692,7 +2692,7 @@ struct llama_context * llama_new_context_with_model(
26922692

26932693
llama_context * ctx = new llama_context(*model, model->vocab);
26942694

2695-
if (params.seed < 0) {
2695+
if (params.seed == LLAMA_DEFAULT_SEED) {
26962696
params.seed = time(NULL);
26972697
}
26982698

llama.h

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@
4646
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
4747
#define LLAMA_SESSION_VERSION 1
4848

49+
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
50+
4951
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
5052
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
5153
#define LLAMA_SUPPORTS_GPU_OFFLOAD
@@ -81,11 +83,11 @@ extern "C" {
8183
typedef void (*llama_progress_callback)(float progress, void *ctx);
8284

8385
struct llama_context_params {
84-
unsigned int seed; // RNG seed, -1 for random
85-
int n_ctx; // text context
86-
int n_batch; // prompt processing batch size
87-
int n_gpu_layers; // number of layers to store in VRAM
88-
int main_gpu; // the GPU that is used for scratch and small tensors
86+
uint32_t seed; // RNG seed, -1 for random
87+
int32_t n_ctx; // text context
88+
int32_t n_batch; // prompt processing batch size
89+
int32_t n_gpu_layers; // number of layers to store in VRAM
90+
int32_t main_gpu; // the GPU that is used for scratch and small tensors
8991
float tensor_split[LLAMA_MAX_DEVICES]; // how to split layers across multiple GPUs
9092
// called with a progress value between 0 and 1, pass NULL to disable
9193
llama_progress_callback progress_callback;

0 commit comments

Comments
 (0)