diff --git a/bark.cpp b/bark.cpp index 6e99d99..6ab0a69 100644 --- a/bark.cpp +++ b/bark.cpp @@ -823,7 +823,7 @@ static bool bark_model_load(std::ifstream & fin, #ifdef GGML_USE_CUDA if (n_gpu_layers > 0) { fprintf(stderr, "%s: using CUDA backend\n", __func__); - model.backend = ggml_backend_cuda_init(); + model.backend = ggml_backend_cuda_init(0); if (!model.backend) { fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); }