From 8e4c24cf796624b80585bbb4aa98224ffbcbef40 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Sun, 9 Mar 2025 16:21:11 +0100 Subject: [PATCH 01/29] release workflow downgrade compiler generator versions --- .github/workflows/release.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 04b4a14..103580a 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -102,22 +102,22 @@ jobs: - { os: Windows, arch: x86_64, - cmake: '-G "Visual Studio 17 2022" -A "x64"' + cmake: '-G "Visual Studio 16 2019" -A "x64"' } - { os: Windows, arch: aarch64, - cmake: '-G "Visual Studio 17 2022" -A "ARM64"' + cmake: '-G "Visual Studio 16 2019" -A "ARM64"' } - { os: Windows, arch: x86, - cmake: '-G "Visual Studio 17 2022" -A "Win32"' + cmake: '-G "Visual Studio 16 2019" -A "Win32"' } - { os: Windows, arch: arm, - cmake: '-G "Visual Studio 17 2022" -A "ARM"' + cmake: '-G "Visual Studio 16 2019" -A "ARM"' } steps: - uses: actions/checkout@v4 From d7b9304bf58b716398269f13888b3852975b9378 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Sun, 9 Mar 2025 16:26:02 +0100 Subject: [PATCH 02/29] release workflow remove windows arm builds --- .github/workflows/release.yaml | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 103580a..f8cd6e5 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -104,21 +104,22 @@ jobs: arch: x86_64, cmake: '-G "Visual Studio 16 2019" -A "x64"' } - - { - os: Windows, - arch: aarch64, - cmake: '-G "Visual Studio 16 2019" -A "ARM64"' - } - { os: Windows, arch: x86, cmake: '-G "Visual Studio 16 2019" -A "Win32"' } - - { - os: Windows, - arch: arm, - cmake: '-G "Visual Studio 16 2019" -A "ARM"' - } +# MSVC aarch64 builds no longer work with llama.cpp (requires clang instead) +# - { +# os: Windows, +# arch: aarch64, +# cmake: '-G "Visual Studio 16 2019" -A "ARM64"' +# } +# - { +# os: Windows, +# arch: arm, +# cmake: '-G "Visual Studio 16 2019" -A "ARM"' +# } steps: - uses: actions/checkout@v4 - name: Build libraries From ccbec25e9c6408c9e87a96e703502d63a239f6d0 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Sun, 9 Mar 2025 16:28:12 +0100 Subject: [PATCH 03/29] update readme --- README.md | 40 +++++----------------------------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 971c06a..cffdae7 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,6 @@ Inference of Meta's LLaMA model (and others) in pure C/C++. 2.3 [Infilling](#infilling) 3. [Android](#importing-in-android) -> [!NOTE] -> Now with support for Llama 3, Phi-3, and flash attention - ## Quick Start Access this library via Maven: @@ -27,18 +24,7 @@ Access this library via Maven: de.kherud llama - 3.4.1 - -``` - -Bu default the default library artifact is built only with CPU inference support. To enable CUDA, use a `cuda12-linux-x86-64` maven classifier: - -```xml - - de.kherud - llama - 3.4.1 - cuda12-linux-x86-64 + 4.0.0 ``` @@ -50,11 +36,7 @@ We support CPU inference for the following platforms out of the box: - Linux x86-64, aarch64 - MacOS x86-64, aarch64 (M-series) -- Windows x86-64, x64, arm (32 bit) - -For GPU inference, we support: - -- Linux x86-64 with CUDA 12.1+ +- Windows x86-64, x64 If any of these match your platform, you can include the Maven dependency and get started. @@ -88,13 +70,9 @@ All compiled libraries will be put in a resources directory matching your platfo #### Library Location -This project has to load three shared libraries: +This project has to load a single shared library `jllama`. -- ggml -- llama -- jllama - -Note, that the file names vary between operating systems, e.g., `ggml.dll` on Windows, `libggml.so` on Linux, and `libggml.dylib` on macOS. +Note, that the file name varies between operating systems, e.g., `jllama.dll` on Windows, `jllama.so` on Linux, and `jllama.dylib` on macOS. The application will search in the following order in the following locations: @@ -105,14 +83,6 @@ The application will search in the following order in the following locations: - From the **JAR**: If any of the libraries weren't found yet, the application will try to use a prebuilt shared library. This of course only works for the [supported platforms](#no-setup-required) . -Not all libraries have to be in the same location. -For example, if you already have a llama.cpp and ggml version you can install them as a system library and rely on the jllama library from the JAR. -This way, you don't have to compile anything. - -#### CUDA - -On Linux x86-64 with CUDA 12.1+, the library assumes that your CUDA libraries are findable in `java.library.path`. If you have CUDA installed in a non-standard location, then point the `java.library.path` to the directory containing the `libcudart.so.12` library. - ## Documentation ### Example @@ -234,7 +204,7 @@ LlamaModel.setLogger(null, (level, message) -> {}); ## Importing in Android You can use this library in Android project. -1. Add java-llama.cpp as a submodule in your android `app` project directory +1. Add java-llama.cpp as a submodule in your an droid `app` project directory ```shell git submodule add https://github.com/kherud/java-llama.cpp ``` From bccab5fdbfd91923828b62c96bfb0a4fed44769b Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Sun, 9 Mar 2025 16:31:37 +0100 Subject: [PATCH 04/29] release workflow remove cuda build --- .github/workflows/release.yaml | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f8cd6e5..d571a2c 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -13,20 +13,21 @@ env: MODEL_NAME: "codellama-7b.Q2_K.gguf" jobs: - build-linux-cuda: - name: Build Linux x86-64 CUDA12 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Build libraries - shell: bash - run: | - .github/dockcross/dockcross-manylinux_2_28-x64 .github/build_cuda_linux.sh "-DOS_NAME=Linux -DOS_ARCH=x86_64" - - name: Upload artifacts - uses: actions/upload-artifact@v4 - with: - name: linux-libraries-cuda - path: ${{ github.workspace }}/src/main/resources_linux_cuda/de/kherud/llama/ +# todo: doesn't work with the newest llama.cpp version +# build-linux-cuda: +# name: Build Linux x86-64 CUDA12 +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v4 +# - name: Build libraries +# shell: bash +# run: | +# .github/dockcross/dockcross-manylinux_2_28-x64 .github/build_cuda_linux.sh "-DOS_NAME=Linux -DOS_ARCH=x86_64" +# - name: Upload artifacts +# uses: actions/upload-artifact@v4 +# with: +# name: linux-libraries-cuda +# path: ${{ github.workspace }}/src/main/resources_linux_cuda/de/kherud/llama/ build-linux-docker: name: Build ${{ matrix.target.os }}-${{ matrix.target.arch }} @@ -194,7 +195,7 @@ jobs: publish: if: ${{ github.event_name != 'workflow_dispatch' || github.event.inputs.build_only == 'no' }} - needs: [ test-linux,build-macos-native,build-win-native,build-linux-cuda ] + needs: [ test-linux,build-macos-native,build-win-native ] #,build-linux-cuda runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 From 0b84ea49d9a2d02cb283ea70b636e4c64e0b5c82 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Sun, 9 Mar 2025 16:36:17 +0100 Subject: [PATCH 05/29] minor release workflow fix --- .github/workflows/release.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d571a2c..ff566ad 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -204,10 +204,10 @@ jobs: pattern: "*-libraries" merge-multiple: true path: ${{ github.workspace }}/src/main/resources/de/kherud/llama/ - - uses: actions/download-artifact@v4 - with: - name: linux-libraries-cuda - path: ${{ github.workspace }}/src/main/resources_linux_cuda/de/kherud/llama/ +# - uses: actions/download-artifact@v4 +# with: +# name: linux-libraries-cuda +# path: ${{ github.workspace }}/src/main/resources_linux_cuda/de/kherud/llama/ - name: Set up Maven Central Repository uses: actions/setup-java@v3 with: From a1a74746a3ceca924db1397ae57ff9a339346544 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Sun, 9 Mar 2025 16:45:27 +0100 Subject: [PATCH 06/29] minor doc comment fix --- src/main/java/de/kherud/llama/ModelParameters.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/de/kherud/llama/ModelParameters.java b/src/main/java/de/kherud/llama/ModelParameters.java index 8615bd5..e4947d4 100644 --- a/src/main/java/de/kherud/llama/ModelParameters.java +++ b/src/main/java/de/kherud/llama/ModelParameters.java @@ -584,7 +584,7 @@ public ModelParameters setCacheTypeV(CacheType type) { } /** - * Set KV cache defragmentation threshold (default: 0.1, < 0 - disabled). + * Set KV cache defragmentation threshold (default: 0.1, < 0 - disabled). */ public ModelParameters setDefragThold(float defragThold) { parameters.put("--defrag-thold", String.valueOf(defragThold)); @@ -640,7 +640,7 @@ public ModelParameters setNuma(NumaStrategy numaStrategy) { } /** - * Set comma-separated list of devices to use for offloading (none = don't offload). + * Set comma-separated list of devices to use for offloading <dev1,dev2,..> (none = don't offload). */ public ModelParameters setDevices(String devices) { parameters.put("--device", devices); From ca148c87ecaa483288f412aa23e53e94b9f09446 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Sun, 9 Mar 2025 17:32:45 +0100 Subject: [PATCH 07/29] update readme llama.cpp tag --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cffdae7..32f555e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ ![Java 11+](https://img.shields.io/badge/Java-11%2B-informational) -![llama.cpp b3534](https://img.shields.io/badge/llama.cpp-%23b3534-informational) +![llama.cpp b4831](https://img.shields.io/badge/llama.cpp-%23b4831-informational) # Java Bindings for [llama.cpp](https://github.com/ggerganov/llama.cpp) From 71373681d3b460bb384750d3e6fd9f17e6055089 Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Wed, 12 Mar 2025 12:19:56 -0700 Subject: [PATCH 08/29] adding re-ranking --- pom.xml | 34 ++++-- src/main/cpp/jllama.cpp | 111 +++++++++++++++++- src/main/cpp/jllama.h | 7 ++ src/main/java/de/kherud/llama/LlamaModel.java | 3 + .../java/de/kherud/llama/LlamaModelTest.java | 20 ++++ 5 files changed, 163 insertions(+), 12 deletions(-) diff --git a/pom.xml b/pom.xml index c081e19..fba7eb4 100644 --- a/pom.xml +++ b/pom.xml @@ -1,14 +1,16 @@ - 4.0.0 de.kherud llama - 4.0.0 + 4.0.1 jar ${project.groupId}:${project.artifactId} - Java Bindings for llama.cpp - A Port of Facebook's LLaMA model in C/C++. + Java Bindings for llama.cpp - A Port of Facebook's LLaMA model + in C/C++. https://github.com/kherud/java-llama.cpp @@ -39,7 +41,8 @@ ossrh - https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ + + https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ @@ -62,6 +65,7 @@ 24.1.0 compile + @@ -71,17 +75,21 @@ maven-compiler-plugin 3.13.0 - + gpu compile - compile + + compile + -h src/main/cpp - ${project.build.outputDirectory}_cuda + + ${project.build.outputDirectory}_cuda @@ -98,10 +106,12 @@ copy-resources - ${project.build.outputDirectory}_cuda + + ${project.build.outputDirectory}_cuda - ${basedir}/src/main/resources_linux_cuda/ + + ${basedir}/src/main/resources_linux_cuda/ **/*.* @@ -176,7 +186,8 @@ maven-jar-plugin 3.4.2 - + cuda package @@ -185,7 +196,8 @@ cuda12-linux-x86-64 - ${project.build.outputDirectory}_cuda + + ${project.build.outputDirectory}_cuda diff --git a/src/main/cpp/jllama.cpp b/src/main/cpp/jllama.cpp index 0db026e..9fafb6f 100644 --- a/src/main/cpp/jllama.cpp +++ b/src/main/cpp/jllama.cpp @@ -112,6 +112,26 @@ char **parse_string_array(JNIEnv *env, const jobjectArray string_array, const js return result; } +std::vector parse_string_array_for_rerank(JNIEnv *env, const jobjectArray string_array, const jsize length) { + std::vector result; + result.reserve(length); // Reserve memory for efficiency + + for (jsize i = 0; i < length; i++) { + jstring javaString = static_cast(env->GetObjectArrayElement(string_array, i)); + if (javaString == nullptr) continue; + + const char *cString = env->GetStringUTFChars(javaString, nullptr); + if (cString != nullptr) { + result.emplace_back(cString); // Add to vector + env->ReleaseStringUTFChars(javaString, cString); + } + + env->DeleteLocalRef(javaString); // Avoid memory leaks + } + + return result; +} + void free_string_array(char **array, jsize length) { if (array != nullptr) { for (jsize i = 0; i < length; i++) { @@ -239,6 +259,7 @@ JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void *reserved) { cc_integer = env->GetMethodID(c_integer, "", "(I)V"); cc_float = env->GetMethodID(c_float, "", "(F)V"); + if (!(cc_output && cc_hash_map && cc_integer && cc_float)) { goto error; } @@ -634,7 +655,6 @@ JNIEXPORT jfloatArray JNICALL Java_de_kherud_llama_LlamaModel_embed(JNIEnv *env, json error = nullptr; server_task_result_ptr result = ctx_server->queue_results.recv(id_task); - ctx_server->queue_results.remove_waiting_task_id(id_task); json response_str = result->to_json(); if (result->is_error()) { @@ -643,6 +663,11 @@ JNIEXPORT jfloatArray JNICALL Java_de_kherud_llama_LlamaModel_embed(JNIEnv *env, env->ThrowNew(c_llama_error, response.c_str()); return nullptr; } + + if (result->is_stop()) { + ctx_server->queue_results.remove_waiting_task_id(id_task); + } + const auto out_res = result->to_json(); @@ -679,6 +704,90 @@ JNIEXPORT jfloatArray JNICALL Java_de_kherud_llama_LlamaModel_embed(JNIEnv *env, return j_embedding; } +JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jobject obj, jstring jprompt, jobjectArray documents) { + jlong server_handle = env->GetLongField(obj, f_model_pointer); + auto *ctx_server = reinterpret_cast(server_handle); // NOLINT(*-no-int-to-ptr) + + if (!ctx_server->params_base.reranking || ctx_server->params_base.embedding) { + env->ThrowNew(c_llama_error, + "This server does not support reranking. Start it with `--reranking` and without `--embedding`"); + return nullptr; + } + + + const std::string prompt = parse_jstring(env, jprompt); + + + + const auto tokenized_query = tokenize_mixed(ctx_server->vocab, prompt, true, true); + + json responses = json::array(); + bool error = false; + + std::vector tasks; + const jsize argc = env->GetArrayLength(documents); + std::vector documentsArray = parse_string_array_for_rerank(env, documents, argc); + + std::vector tokenized_docs = tokenize_input_prompts(ctx_server->vocab, documentsArray, true, true); + + tasks.reserve(tokenized_docs.size()); + for (size_t i = 0; i < tokenized_docs.size(); i++) { + server_task task = server_task(SERVER_TASK_TYPE_RERANK); + task.id = ctx_server->queue_tasks.get_new_id(); + task.index = i; + task.prompt_tokens = format_rerank(ctx_server->vocab, tokenized_query, tokenized_docs[i]); + tasks.push_back(task); + } + ctx_server->queue_results.add_waiting_tasks(tasks); + ctx_server->queue_tasks.post(tasks); + + // get the result + std::unordered_set task_ids = server_task::get_list_id(tasks); + std::vector results(task_ids.size()); + + // Create a new HashMap instance + jobject o_probabilities = env->NewObject(c_hash_map, cc_hash_map); + if (o_probabilities == nullptr) { + env->ThrowNew(c_llama_error, "Failed to create HashMap object."); + return nullptr; + } + + for (int i = 0; i < (int)task_ids.size(); i++) { + server_task_result_ptr result = ctx_server->queue_results.recv(task_ids); + if (result->is_error()) { + std::string response = result->to_json()["message"].get(); + for (const int id_task : task_ids) { + ctx_server->queue_results.remove_waiting_task_id(id_task); + } + env->ThrowNew(c_llama_error, response.c_str()); + return nullptr; + } + + const auto out_res = result->to_json(); + + std::cout << out_res.dump(4) << std::endl; + + if (result->is_stop()) { + for (const int id_task : task_ids) { + ctx_server->queue_results.remove_waiting_task_id(id_task); + } + } + + int index = out_res["index"].get(); + float score = out_res["score"].get(); + std::string tok_str = documentsArray[index]; + jstring jtok_str = env->NewStringUTF(tok_str.c_str()); + + jobject jprob = env->NewObject(c_float, cc_float, score); + env->CallObjectMethod(o_probabilities, m_map_put, jtok_str, jprob); + env->DeleteLocalRef(jtok_str); + env->DeleteLocalRef(jprob); + } + jbyteArray jbytes = parse_jbytes(env, prompt); + return env->NewObject(c_output, cc_output, jbytes, o_probabilities, true); + +} + JNIEXPORT jintArray JNICALL Java_de_kherud_llama_LlamaModel_encode(JNIEnv *env, jobject obj, jstring jprompt) { jlong server_handle = env->GetLongField(obj, f_model_pointer); auto *ctx_server = reinterpret_cast(server_handle); // NOLINT(*-no-int-to-ptr) diff --git a/src/main/cpp/jllama.h b/src/main/cpp/jllama.h index 63d95b7..01e4d20 100644 --- a/src/main/cpp/jllama.h +++ b/src/main/cpp/jllama.h @@ -84,6 +84,13 @@ JNIEXPORT void JNICALL Java_de_kherud_llama_LlamaModel_releaseTask(JNIEnv *, job */ JNIEXPORT jbyteArray JNICALL Java_de_kherud_llama_LlamaModel_jsonSchemaToGrammarBytes(JNIEnv *, jclass, jstring); +/* + * Class: de_kherud_llama_LlamaModel + * Method: rerank + * Signature: (Ljava/lang/String;[Ljava/lang/String;)Lde/kherud/llama/LlamaOutput; + */ +JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *, jobject, jstring, jobjectArray); + #ifdef __cplusplus } #endif diff --git a/src/main/java/de/kherud/llama/LlamaModel.java b/src/main/java/de/kherud/llama/LlamaModel.java index 7749b32..ffa9675 100644 --- a/src/main/java/de/kherud/llama/LlamaModel.java +++ b/src/main/java/de/kherud/llama/LlamaModel.java @@ -5,6 +5,7 @@ import java.lang.annotation.Native; import java.nio.charset.StandardCharsets; +import java.util.List; import java.util.function.BiConsumer; /** @@ -137,4 +138,6 @@ public void close() { public static String jsonSchemaToGrammar(String schema) { return new String(jsonSchemaToGrammarBytes(schema), StandardCharsets.UTF_8); } + + public native LlamaOutput rerank(String query, String... documents); } diff --git a/src/test/java/de/kherud/llama/LlamaModelTest.java b/src/test/java/de/kherud/llama/LlamaModelTest.java index f2e931b..6481f09 100644 --- a/src/test/java/de/kherud/llama/LlamaModelTest.java +++ b/src/test/java/de/kherud/llama/LlamaModelTest.java @@ -158,6 +158,26 @@ public void testEmbedding() { float[] embedding = model.embed(prefix); Assert.assertEquals(4096, embedding.length); } + + + @Ignore + /** + * To run this test download the model from here https://huggingface.co/mradermacher/jina-reranker-v1-tiny-en-GGUF/tree/main + * remove .enableEmbedding() from model setup and add .enableReRanking() and then enable the test. + */ + public void testReRanking() { + + String query = "Machine learning is"; + String [] TEST_DOCUMENTS = new String[] { + "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.", + "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.", + "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.", + "Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine." + }; + LlamaOutput llamaOutput = model.rerank(query, TEST_DOCUMENTS[0], TEST_DOCUMENTS[1], TEST_DOCUMENTS[2], TEST_DOCUMENTS[3] ); + + System.out.println(llamaOutput); + } @Test public void testTokenization() { From e9c3de7ef5918c86fd8cca03efb58f8852339212 Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Wed, 12 Mar 2025 19:08:02 -0700 Subject: [PATCH 09/29] moving reranking to it's own test. --- .github/workflows/ci.yml | 14 +++++- .../de/kherud/llama/RerankingModelTest.java | 47 +++++++++++++++++++ 2 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 src/test/java/de/kherud/llama/RerankingModelTest.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 631fc86..9e913a9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,8 @@ on: env: MODEL_URL: https://huggingface.co/TheBloke/CodeLlama-7B-GGUF/resolve/main/codellama-7b.Q2_K.gguf MODEL_NAME: codellama-7b.Q2_K.gguf + RERANKING_MODEL_URL: https://huggingface.co/gpustack/jina-reranker-v1-tiny-en-GGUF/resolve/main/jina-reranker-v1-tiny-en-Q4_0.gguf + RERANKING_MODEL_NAME: jina-reranker-v1-tiny-en-Q4_0.gguf jobs: build-and-test-linux: @@ -21,8 +23,10 @@ jobs: run: | mvn compile .github/build.sh -DLLAMA_VERBOSE=ON - - name: Download model + - name: Download text generation model run: curl -L ${MODEL_URL} --create-dirs -o models/${MODEL_NAME} + - name: Download reranking model + run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} - name: Run tests run: mvn test - if: failure() @@ -53,8 +57,11 @@ jobs: run: | mvn compile .github/build.sh ${{ matrix.target.cmake }} - - name: Download model + - name: Download text generaton model model run: curl -L ${MODEL_URL} --create-dirs -o models/${MODEL_NAME} + - name: Download reranking model + run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} + - name: Run tests run: mvn test - if: failure() @@ -79,6 +86,9 @@ jobs: .github\build.bat -DLLAMA_VERBOSE=ON - name: Download model run: curl -L $env:MODEL_URL --create-dirs -o models/$env:MODEL_NAME + - name: Download reranking model + run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} + - name: Run tests run: mvn test - if: failure() diff --git a/src/test/java/de/kherud/llama/RerankingModelTest.java b/src/test/java/de/kherud/llama/RerankingModelTest.java new file mode 100644 index 0000000..38ca7e2 --- /dev/null +++ b/src/test/java/de/kherud/llama/RerankingModelTest.java @@ -0,0 +1,47 @@ +package de.kherud.llama; + +import java.io.*; +import java.util.*; +import java.util.regex.Pattern; + +import de.kherud.llama.args.LogFormat; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +public class RerankingModelTest { + + private static LlamaModel model; + + @BeforeClass + public static void setup() { + model = new LlamaModel( + new ModelParameters().setCtxSize(128).setModel("models/jina-reranker-v1-tiny-en.Q4_K_M.gguf") + .setGpuLayers(43).enableReranking().enableLogTimestamps().enableLogPrefix()); + } + + @AfterClass + public static void tearDown() { + if (model != null) { + model.close(); + } + } + + @Test + public void testReRanking() { + + String query = "Machine learning is"; + String[] TEST_DOCUMENTS = new String[] { + "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.", + "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.", + "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.", + "Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine." }; + LlamaOutput llamaOutput = model.rerank(query, TEST_DOCUMENTS[0], TEST_DOCUMENTS[1], TEST_DOCUMENTS[2], + TEST_DOCUMENTS[3]); + + System.out.println(llamaOutput); + } + +} From 01a6f83726cbae097fb282e6095f12e1dc10da4b Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Wed, 12 Mar 2025 20:46:15 -0700 Subject: [PATCH 10/29] updating the workflow and reranking --- .github/workflows/ci.yml | 8 ++++++-- src/test/java/de/kherud/llama/RerankingModelTest.java | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9e913a9..9ff9dfb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,6 +27,8 @@ jobs: run: curl -L ${MODEL_URL} --create-dirs -o models/${MODEL_NAME} - name: Download reranking model run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} + - name: List files in models directory + run: ls -l models/ - name: Run tests run: mvn test - if: failure() @@ -61,7 +63,8 @@ jobs: run: curl -L ${MODEL_URL} --create-dirs -o models/${MODEL_NAME} - name: Download reranking model run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} - + - name: List files in models directory + run: ls -l models/ - name: Run tests run: mvn test - if: failure() @@ -88,7 +91,8 @@ jobs: run: curl -L $env:MODEL_URL --create-dirs -o models/$env:MODEL_NAME - name: Download reranking model run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} - + - name: List files in models directory + run: ls -l models/ - name: Run tests run: mvn test - if: failure() diff --git a/src/test/java/de/kherud/llama/RerankingModelTest.java b/src/test/java/de/kherud/llama/RerankingModelTest.java index 38ca7e2..69adb7f 100644 --- a/src/test/java/de/kherud/llama/RerankingModelTest.java +++ b/src/test/java/de/kherud/llama/RerankingModelTest.java @@ -18,7 +18,7 @@ public class RerankingModelTest { @BeforeClass public static void setup() { model = new LlamaModel( - new ModelParameters().setCtxSize(128).setModel("models/jina-reranker-v1-tiny-en.Q4_K_M.gguf") + new ModelParameters().setCtxSize(128).setModel("models/jina-reranker-v1-tiny-en-Q4_0.gguf") .setGpuLayers(43).enableReranking().enableLogTimestamps().enableLogPrefix()); } From 1685c3e5044fa4012595d5b7ea113da41f6c0ee8 Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Wed, 12 Mar 2025 20:57:02 -0700 Subject: [PATCH 11/29] updating windows build --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9ff9dfb..a15f809 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -90,7 +90,7 @@ jobs: - name: Download model run: curl -L $env:MODEL_URL --create-dirs -o models/$env:MODEL_NAME - name: Download reranking model - run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} + run: curl -L $env:RERANKING_MODEL_URL --create-dirs -o models/$env:RERANKING_MODEL_NAME - name: List files in models directory run: ls -l models/ - name: Run tests From 06b11a705669ac09864338b9c55364cf886b7e1e Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Wed, 12 Mar 2025 21:36:17 -0700 Subject: [PATCH 12/29] updated the test. --- .../de/kherud/llama/RerankingModelTest.java | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/src/test/java/de/kherud/llama/RerankingModelTest.java b/src/test/java/de/kherud/llama/RerankingModelTest.java index 69adb7f..8145829 100644 --- a/src/test/java/de/kherud/llama/RerankingModelTest.java +++ b/src/test/java/de/kherud/llama/RerankingModelTest.java @@ -1,14 +1,10 @@ package de.kherud.llama; -import java.io.*; -import java.util.*; -import java.util.regex.Pattern; +import java.util.Map; -import de.kherud.llama.args.LogFormat; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; public class RerankingModelTest { @@ -41,7 +37,32 @@ public void testReRanking() { LlamaOutput llamaOutput = model.rerank(query, TEST_DOCUMENTS[0], TEST_DOCUMENTS[1], TEST_DOCUMENTS[2], TEST_DOCUMENTS[3]); - System.out.println(llamaOutput); + Map rankedDocumentsMap = llamaOutput.probabilities; + Assert.assertTrue(rankedDocumentsMap.size()==TEST_DOCUMENTS.length); + + // Finding the most and least relevant documents + String mostRelevantDoc = null; + String leastRelevantDoc = null; + float maxScore = Float.MIN_VALUE; + float minScore = Float.MAX_VALUE; + + for (Map.Entry entry : rankedDocumentsMap.entrySet()) { + if (entry.getValue() > maxScore) { + maxScore = entry.getValue(); + mostRelevantDoc = entry.getKey(); + } + if (entry.getValue() < minScore) { + minScore = entry.getValue(); + leastRelevantDoc = entry.getKey(); + } + } + + // Assertions + Assert.assertTrue(maxScore > minScore); + Assert.assertEquals("Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.", mostRelevantDoc); + Assert.assertEquals("Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine.", leastRelevantDoc); + + } } From faa494e886824a888ea12cf388c9f45229ff35e7 Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Thu, 13 Mar 2025 15:41:56 -0700 Subject: [PATCH 13/29] removed std print and adding ranking test. --- src/main/cpp/jllama.cpp | 2 - .../java/de/kherud/llama/LlamaIterator.java | 3 ++ src/main/java/de/kherud/llama/LlamaModel.java | 25 +++++++++- src/main/java/de/kherud/llama/Pair.java | 48 +++++++++++++++++++ .../de/kherud/llama/RerankingModelTest.java | 29 ++++++++--- 5 files changed, 97 insertions(+), 10 deletions(-) create mode 100644 src/main/java/de/kherud/llama/Pair.java diff --git a/src/main/cpp/jllama.cpp b/src/main/cpp/jllama.cpp index 9fafb6f..b0242c3 100644 --- a/src/main/cpp/jllama.cpp +++ b/src/main/cpp/jllama.cpp @@ -765,8 +765,6 @@ JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jo const auto out_res = result->to_json(); - std::cout << out_res.dump(4) << std::endl; - if (result->is_stop()) { for (const int id_task : task_ids) { ctx_server->queue_results.remove_waiting_task_id(id_task); diff --git a/src/main/java/de/kherud/llama/LlamaIterator.java b/src/main/java/de/kherud/llama/LlamaIterator.java index fdff993..cb1c5c2 100644 --- a/src/main/java/de/kherud/llama/LlamaIterator.java +++ b/src/main/java/de/kherud/llama/LlamaIterator.java @@ -35,6 +35,9 @@ public LlamaOutput next() { } LlamaOutput output = model.receiveCompletion(taskId); hasNext = !output.stop; + if (output.stop) { + model.releaseTask(taskId); + } return output; } diff --git a/src/main/java/de/kherud/llama/LlamaModel.java b/src/main/java/de/kherud/llama/LlamaModel.java index ffa9675..9ed86d0 100644 --- a/src/main/java/de/kherud/llama/LlamaModel.java +++ b/src/main/java/de/kherud/llama/LlamaModel.java @@ -5,7 +5,9 @@ import java.lang.annotation.Native; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.BiConsumer; /** @@ -131,7 +133,7 @@ public void close() { private native void delete(); - private native void releaseTask(int taskId); + native void releaseTask(int taskId); private static native byte[] jsonSchemaToGrammarBytes(String schema); @@ -139,5 +141,26 @@ public static String jsonSchemaToGrammar(String schema) { return new String(jsonSchemaToGrammarBytes(schema), StandardCharsets.UTF_8); } + public List> rerank(boolean reRank, String query, String ... documents) { + LlamaOutput output = rerank(query, documents); + + Map scoredDocumentMap = output.probabilities; + + List> rankedDocuments = new ArrayList<>(); + + if (reRank) { + // Sort in descending order based on Float values + scoredDocumentMap.entrySet() + .stream() + .sorted((a, b) -> Float.compare(b.getValue(), a.getValue())) // Descending order + .forEach(entry -> rankedDocuments.add(new Pair<>(entry.getKey(), entry.getValue()))); + } else { + // Copy without sorting + scoredDocumentMap.forEach((key, value) -> rankedDocuments.add(new Pair<>(key, value))); + } + + return rankedDocuments; + } + public native LlamaOutput rerank(String query, String... documents); } diff --git a/src/main/java/de/kherud/llama/Pair.java b/src/main/java/de/kherud/llama/Pair.java new file mode 100644 index 0000000..48ac648 --- /dev/null +++ b/src/main/java/de/kherud/llama/Pair.java @@ -0,0 +1,48 @@ +package de.kherud.llama; + +import java.util.Objects; + +public class Pair { + + private final K key; + private final V value; + + public Pair(K key, V value) { + this.key = key; + this.value = value; + } + + public K getKey() { + return key; + } + + public V getValue() { + return value; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Pair other = (Pair) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return "Pair [key=" + key + ", value=" + value + "]"; + } + + + + +} diff --git a/src/test/java/de/kherud/llama/RerankingModelTest.java b/src/test/java/de/kherud/llama/RerankingModelTest.java index 8145829..60d32bd 100644 --- a/src/test/java/de/kherud/llama/RerankingModelTest.java +++ b/src/test/java/de/kherud/llama/RerankingModelTest.java @@ -1,5 +1,6 @@ package de.kherud.llama; +import java.util.List; import java.util.Map; import org.junit.AfterClass; @@ -10,6 +11,13 @@ public class RerankingModelTest { private static LlamaModel model; + + String query = "Machine learning is"; + String[] TEST_DOCUMENTS = new String[] { + "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.", + "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.", + "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.", + "Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine." }; @BeforeClass public static void setup() { @@ -28,12 +36,7 @@ public static void tearDown() { @Test public void testReRanking() { - String query = "Machine learning is"; - String[] TEST_DOCUMENTS = new String[] { - "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.", - "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.", - "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.", - "Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine." }; + LlamaOutput llamaOutput = model.rerank(query, TEST_DOCUMENTS[0], TEST_DOCUMENTS[1], TEST_DOCUMENTS[2], TEST_DOCUMENTS[3]); @@ -64,5 +67,17 @@ public void testReRanking() { } - + + @Test + public void testSortedReRanking() { + List> rankedDocuments = model.rerank(true, query, TEST_DOCUMENTS); + Assert.assertEquals(rankedDocuments.size(), TEST_DOCUMENTS.length); + + // Check the ranking order: each score should be >= the next one + for (int i = 0; i < rankedDocuments.size() - 1; i++) { + float currentScore = rankedDocuments.get(i).getValue(); + float nextScore = rankedDocuments.get(i + 1).getValue(); + Assert.assertTrue("Ranking order incorrect at index " + i, currentScore >= nextScore); + } + } } From fe7c337a76f498f2fb7b7e1c501386554554235c Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Thu, 13 Mar 2025 16:57:46 -0700 Subject: [PATCH 14/29] updating release.yaml file for reranking --- .github/workflows/release.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index ff566ad..6403202 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -11,6 +11,8 @@ on: env: MODEL_URL: "https://huggingface.co/TheBloke/CodeLlama-7B-GGUF/resolve/main/codellama-7b.Q2_K.gguf" MODEL_NAME: "codellama-7b.Q2_K.gguf" + RERANKING_MODEL_URL: "https://huggingface.co/gpustack/jina-reranker-v1-tiny-en-GGUF/resolve/main/jina-reranker-v1-tiny-en-Q4_0.gguf" + RERANKING_MODEL_NAME: "jina-reranker-v1-tiny-en-Q4_0.gguf" jobs: # todo: doesn't work with the newest llama.cpp version @@ -144,8 +146,10 @@ jobs: with: name: Linux-x86_64-libraries path: ${{ github.workspace }}/src/main/resources/de/kherud/llama/ - - name: Download model + - name: Download text generation model run: curl -L ${MODEL_URL} --create-dirs -o models/${MODEL_NAME} + - name: Download reranking model + run: curl -L ${RERANKING_MODEL_URL} --create-dirs -o models/${RERANKING_MODEL_NAME} - uses: actions/setup-java@v4 with: distribution: 'zulu' From 3d28a989ee7741715d1c593ab3282363185a72e4 Mon Sep 17 00:00:00 2001 From: Vaijanath Rao Date: Fri, 14 Mar 2025 02:36:21 -0700 Subject: [PATCH 15/29] adding support for messages. --- pom.xml | 8 +++- src/main/cpp/jllama.cpp | 14 ++++++ src/main/cpp/jllama.h | 7 +++ .../de/kherud/llama/InferenceParameters.java | 45 ++++++++++++++++++- src/main/java/de/kherud/llama/LlamaModel.java | 5 +++ .../java/de/kherud/llama/LlamaModelTest.java | 16 +++++++ 6 files changed, 92 insertions(+), 3 deletions(-) diff --git a/pom.xml b/pom.xml index fba7eb4..f4e1e45 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ de.kherud llama - 4.0.1 + 4.0.0 jar ${project.groupId}:${project.artifactId} @@ -65,7 +65,11 @@ 24.1.0 compile - + + com.fasterxml.jackson.core + jackson-databind + 2.16.0 + diff --git a/src/main/cpp/jllama.cpp b/src/main/cpp/jllama.cpp index b0242c3..a0aca71 100644 --- a/src/main/cpp/jllama.cpp +++ b/src/main/cpp/jllama.cpp @@ -786,6 +786,20 @@ JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jo } +JNIEXPORT jstring JNICALL Java_de_kherud_llama_LlamaModel_applyTemplate(JNIEnv *env, jobject obj, jstring jparams){ + jlong server_handle = env->GetLongField(obj, f_model_pointer); + auto *ctx_server = reinterpret_cast(server_handle); // NOLINT(*-no-int-to-ptr) + + std::string c_params = parse_jstring(env, jparams); + json data = json::parse(c_params); + + json templateData = oaicompat_completion_params_parse(data, ctx_server->params_base.use_jinja, ctx_server->params_base.reasoning_format, ctx_server->chat_templates.get()); + std::string tok_str = templateData.at("prompt"); + jstring jtok_str = env->NewStringUTF(tok_str.c_str()); + + return jtok_str; +} + JNIEXPORT jintArray JNICALL Java_de_kherud_llama_LlamaModel_encode(JNIEnv *env, jobject obj, jstring jprompt) { jlong server_handle = env->GetLongField(obj, f_model_pointer); auto *ctx_server = reinterpret_cast(server_handle); // NOLINT(*-no-int-to-ptr) diff --git a/src/main/cpp/jllama.h b/src/main/cpp/jllama.h index 01e4d20..dc17fa8 100644 --- a/src/main/cpp/jllama.h +++ b/src/main/cpp/jllama.h @@ -91,6 +91,13 @@ JNIEXPORT jbyteArray JNICALL Java_de_kherud_llama_LlamaModel_jsonSchemaToGrammar */ JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *, jobject, jstring, jobjectArray); +/* + * Class: de_kherud_llama_LlamaModel + * Method: applyTemplate + * Signature: (Ljava/lang/String;)Ljava/lang/String;; + */ +JNIEXPORT jstring JNICALL Java_de_kherud_llama_LlamaModel_applyTemplate(JNIEnv *, jobject, jstring); + #ifdef __cplusplus } #endif diff --git a/src/main/java/de/kherud/llama/InferenceParameters.java b/src/main/java/de/kherud/llama/InferenceParameters.java index 0ac1b1d..e868be0 100644 --- a/src/main/java/de/kherud/llama/InferenceParameters.java +++ b/src/main/java/de/kherud/llama/InferenceParameters.java @@ -1,8 +1,13 @@ package de.kherud.llama; import java.util.Collection; +import java.util.List; import java.util.Map; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; + import de.kherud.llama.args.MiroStat; import de.kherud.llama.args.Sampler; @@ -12,6 +17,9 @@ * {@link LlamaModel#complete(InferenceParameters)}. */ public final class InferenceParameters extends JsonParameters { + + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Reusable ObjectMapper + private static final String PARAM_PROMPT = "prompt"; private static final String PARAM_INPUT_PREFIX = "input_prefix"; @@ -47,6 +55,7 @@ public final class InferenceParameters extends JsonParameters { private static final String PARAM_STREAM = "stream"; private static final String PARAM_USE_CHAT_TEMPLATE = "use_chat_template"; private static final String PARAM_USE_JINJA = "use_jinja"; + private static final String PARAM_MESSAGES = "messages"; public InferenceParameters(String prompt) { // we always need a prompt @@ -493,7 +502,41 @@ public InferenceParameters setUseChatTemplate(boolean useChatTemplate) { return this; } - + /** + * Set the messages for chat-based inference. + * - Allows **only one** system message. + * - Allows **one or more** user/assistant messages. + */ + public InferenceParameters setMessages(String systemMessage, List> messages) { + ArrayNode messagesArray = OBJECT_MAPPER.createArrayNode(); + + // Add system message (if provided) + if (systemMessage != null && !systemMessage.isEmpty()) { + ObjectNode systemObj = OBJECT_MAPPER.createObjectNode(); + systemObj.put("role", "system"); + systemObj.put("content", systemMessage); + messagesArray.add(systemObj); + } + + // Add user/assistant messages + for (Pair message : messages) { + String role = message.getKey(); + String content = message.getValue(); + + if (!role.equals("user") && !role.equals("assistant")) { + throw new IllegalArgumentException("Invalid role: " + role + ". Role must be 'user' or 'assistant'."); + } + + ObjectNode messageObj = OBJECT_MAPPER.createObjectNode(); + messageObj.put("role", role); + messageObj.put("content", content); + messagesArray.add(messageObj); + } + + // Convert ArrayNode to a JSON string and store it in parameters + parameters.put(PARAM_MESSAGES, messagesArray.toString()); + return this; + } diff --git a/src/main/java/de/kherud/llama/LlamaModel.java b/src/main/java/de/kherud/llama/LlamaModel.java index 9ed86d0..eab3620 100644 --- a/src/main/java/de/kherud/llama/LlamaModel.java +++ b/src/main/java/de/kherud/llama/LlamaModel.java @@ -163,4 +163,9 @@ public List> rerank(boolean reRank, String query, String ... } public native LlamaOutput rerank(String query, String... documents); + + public String applyTemplate(InferenceParameters parameters) { + return applyTemplate(parameters.toString()); + } + public native String applyTemplate(String parametersJson); } diff --git a/src/test/java/de/kherud/llama/LlamaModelTest.java b/src/test/java/de/kherud/llama/LlamaModelTest.java index 6481f09..e3e69d8 100644 --- a/src/test/java/de/kherud/llama/LlamaModelTest.java +++ b/src/test/java/de/kherud/llama/LlamaModelTest.java @@ -316,4 +316,20 @@ public void testJsonSchemaToGrammar() { String actualGrammar = LlamaModel.jsonSchemaToGrammar(schema); Assert.assertEquals(expectedGrammar, actualGrammar); } + + @Test + public void testTemplate() { + + List> userMessages = new ArrayList<>(); + userMessages.add(new Pair<>("user", "What is the best book?")); + userMessages.add(new Pair<>("assistant", "It depends on your interests. Do you like fiction or non-fiction?")); + + InferenceParameters params = new InferenceParameters("A book recommendation system.") + .setMessages("Book", userMessages) + .setTemperature(0.95f) + .setStopStrings("\"\"\"") + .setNPredict(nPredict) + .setSeed(42); + Assert.assertEquals(model.applyTemplate(params), "<|im_start|>system\nBook<|im_end|>\n<|im_start|>user\nWhat is the best book?<|im_end|>\n<|im_start|>assistant\nIt depends on your interests. Do you like fiction or non-fiction?<|im_end|>\n<|im_start|>assistant\n"); + } } From 6e95f61d51afa629b8a998d34f3cc3c4eb623709 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Tue, 18 Mar 2025 21:01:25 +0100 Subject: [PATCH 16/29] reformat c++ code --- src/main/cpp/jllama.cpp | 159 ++++++++++++++++++++-------------------- 1 file changed, 79 insertions(+), 80 deletions(-) diff --git a/src/main/cpp/jllama.cpp b/src/main/cpp/jllama.cpp index a0aca71..b9436b7 100644 --- a/src/main/cpp/jllama.cpp +++ b/src/main/cpp/jllama.cpp @@ -112,13 +112,15 @@ char **parse_string_array(JNIEnv *env, const jobjectArray string_array, const js return result; } -std::vector parse_string_array_for_rerank(JNIEnv *env, const jobjectArray string_array, const jsize length) { +std::vector parse_string_array_for_rerank(JNIEnv *env, const jobjectArray string_array, + const jsize length) { std::vector result; result.reserve(length); // Reserve memory for efficiency for (jsize i = 0; i < length; i++) { jstring javaString = static_cast(env->GetObjectArrayElement(string_array, i)); - if (javaString == nullptr) continue; + if (javaString == nullptr) + continue; const char *cString = env->GetStringUTFChars(javaString, nullptr); if (cString != nullptr) { @@ -259,7 +261,6 @@ JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void *reserved) { cc_integer = env->GetMethodID(c_integer, "", "(I)V"); cc_float = env->GetMethodID(c_float, "", "(F)V"); - if (!(cc_output && cc_hash_map && cc_integer && cc_float)) { goto error; } @@ -663,12 +664,11 @@ JNIEXPORT jfloatArray JNICALL Java_de_kherud_llama_LlamaModel_embed(JNIEnv *env, env->ThrowNew(c_llama_error, response.c_str()); return nullptr; } - + if (result->is_stop()) { ctx_server->queue_results.remove_waiting_task_id(id_task); } - const auto out_res = result->to_json(); // Extract "embedding" as a vector of vectors (2D array) @@ -704,100 +704,99 @@ JNIEXPORT jfloatArray JNICALL Java_de_kherud_llama_LlamaModel_embed(JNIEnv *env, return j_embedding; } -JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jobject obj, jstring jprompt, jobjectArray documents) { +JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jobject obj, jstring jprompt, + jobjectArray documents) { jlong server_handle = env->GetLongField(obj, f_model_pointer); auto *ctx_server = reinterpret_cast(server_handle); // NOLINT(*-no-int-to-ptr) - if (!ctx_server->params_base.reranking || ctx_server->params_base.embedding) { - env->ThrowNew(c_llama_error, + if (!ctx_server->params_base.reranking || ctx_server->params_base.embedding) { + env->ThrowNew(c_llama_error, "This server does not support reranking. Start it with `--reranking` and without `--embedding`"); - return nullptr; + return nullptr; } - const std::string prompt = parse_jstring(env, jprompt); - - const auto tokenized_query = tokenize_mixed(ctx_server->vocab, prompt, true, true); - + json responses = json::array(); bool error = false; - - std::vector tasks; - const jsize argc = env->GetArrayLength(documents); - std::vector documentsArray = parse_string_array_for_rerank(env, documents, argc); - - std::vector tokenized_docs = tokenize_input_prompts(ctx_server->vocab, documentsArray, true, true); - - tasks.reserve(tokenized_docs.size()); - for (size_t i = 0; i < tokenized_docs.size(); i++) { - server_task task = server_task(SERVER_TASK_TYPE_RERANK); - task.id = ctx_server->queue_tasks.get_new_id(); - task.index = i; - task.prompt_tokens = format_rerank(ctx_server->vocab, tokenized_query, tokenized_docs[i]); - tasks.push_back(task); - } - ctx_server->queue_results.add_waiting_tasks(tasks); - ctx_server->queue_tasks.post(tasks); - - // get the result - std::unordered_set task_ids = server_task::get_list_id(tasks); - std::vector results(task_ids.size()); - - // Create a new HashMap instance - jobject o_probabilities = env->NewObject(c_hash_map, cc_hash_map); - if (o_probabilities == nullptr) { - env->ThrowNew(c_llama_error, "Failed to create HashMap object."); - return nullptr; - } - - for (int i = 0; i < (int)task_ids.size(); i++) { - server_task_result_ptr result = ctx_server->queue_results.recv(task_ids); - if (result->is_error()) { - std::string response = result->to_json()["message"].get(); - for (const int id_task : task_ids) { - ctx_server->queue_results.remove_waiting_task_id(id_task); - } - env->ThrowNew(c_llama_error, response.c_str()); - return nullptr; - } - - const auto out_res = result->to_json(); - - if (result->is_stop()) { - for (const int id_task : task_ids) { - ctx_server->queue_results.remove_waiting_task_id(id_task); - } - } - - int index = out_res["index"].get(); - float score = out_res["score"].get(); - std::string tok_str = documentsArray[index]; - jstring jtok_str = env->NewStringUTF(tok_str.c_str()); - - jobject jprob = env->NewObject(c_float, cc_float, score); - env->CallObjectMethod(o_probabilities, m_map_put, jtok_str, jprob); - env->DeleteLocalRef(jtok_str); - env->DeleteLocalRef(jprob); - } + + std::vector tasks; + const jsize argc = env->GetArrayLength(documents); + std::vector documentsArray = parse_string_array_for_rerank(env, documents, argc); + + std::vector tokenized_docs = tokenize_input_prompts(ctx_server->vocab, documentsArray, true, true); + + tasks.reserve(tokenized_docs.size()); + for (size_t i = 0; i < tokenized_docs.size(); i++) { + server_task task = server_task(SERVER_TASK_TYPE_RERANK); + task.id = ctx_server->queue_tasks.get_new_id(); + task.index = i; + task.prompt_tokens = format_rerank(ctx_server->vocab, tokenized_query, tokenized_docs[i]); + tasks.push_back(task); + } + ctx_server->queue_results.add_waiting_tasks(tasks); + ctx_server->queue_tasks.post(tasks); + + // get the result + std::unordered_set task_ids = server_task::get_list_id(tasks); + std::vector results(task_ids.size()); + + // Create a new HashMap instance + jobject o_probabilities = env->NewObject(c_hash_map, cc_hash_map); + if (o_probabilities == nullptr) { + env->ThrowNew(c_llama_error, "Failed to create HashMap object."); + return nullptr; + } + + for (int i = 0; i < (int)task_ids.size(); i++) { + server_task_result_ptr result = ctx_server->queue_results.recv(task_ids); + if (result->is_error()) { + std::string response = result->to_json()["message"].get(); + for (const int id_task : task_ids) { + ctx_server->queue_results.remove_waiting_task_id(id_task); + } + env->ThrowNew(c_llama_error, response.c_str()); + return nullptr; + } + + const auto out_res = result->to_json(); + + if (result->is_stop()) { + for (const int id_task : task_ids) { + ctx_server->queue_results.remove_waiting_task_id(id_task); + } + } + + int index = out_res["index"].get(); + float score = out_res["score"].get(); + std::string tok_str = documentsArray[index]; + jstring jtok_str = env->NewStringUTF(tok_str.c_str()); + + jobject jprob = env->NewObject(c_float, cc_float, score); + env->CallObjectMethod(o_probabilities, m_map_put, jtok_str, jprob); + env->DeleteLocalRef(jtok_str); + env->DeleteLocalRef(jprob); + } jbyteArray jbytes = parse_jbytes(env, prompt); - return env->NewObject(c_output, cc_output, jbytes, o_probabilities, true); - + return env->NewObject(c_output, cc_output, jbytes, o_probabilities, true); } -JNIEXPORT jstring JNICALL Java_de_kherud_llama_LlamaModel_applyTemplate(JNIEnv *env, jobject obj, jstring jparams){ - jlong server_handle = env->GetLongField(obj, f_model_pointer); +JNIEXPORT jstring JNICALL Java_de_kherud_llama_LlamaModel_applyTemplate(JNIEnv *env, jobject obj, jstring jparams) { + jlong server_handle = env->GetLongField(obj, f_model_pointer); auto *ctx_server = reinterpret_cast(server_handle); // NOLINT(*-no-int-to-ptr) std::string c_params = parse_jstring(env, jparams); json data = json::parse(c_params); - - json templateData = oaicompat_completion_params_parse(data, ctx_server->params_base.use_jinja, ctx_server->params_base.reasoning_format, ctx_server->chat_templates.get()); + + json templateData = + oaicompat_completion_params_parse(data, ctx_server->params_base.use_jinja, + ctx_server->params_base.reasoning_format, ctx_server->chat_templates.get()); std::string tok_str = templateData.at("prompt"); - jstring jtok_str = env->NewStringUTF(tok_str.c_str()); - - return jtok_str; + jstring jtok_str = env->NewStringUTF(tok_str.c_str()); + + return jtok_str; } JNIEXPORT jintArray JNICALL Java_de_kherud_llama_LlamaModel_encode(JNIEnv *env, jobject obj, jstring jprompt) { From 986bddf63bd294c37d903d14906bed25ba95d6e9 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Tue, 18 Mar 2025 21:25:18 +0100 Subject: [PATCH 17/29] re-use parse_string_array for re-ranking --- src/main/cpp/jllama.cpp | 39 +++++++++------------------------------ 1 file changed, 9 insertions(+), 30 deletions(-) diff --git a/src/main/cpp/jllama.cpp b/src/main/cpp/jllama.cpp index b9436b7..ac056b9 100644 --- a/src/main/cpp/jllama.cpp +++ b/src/main/cpp/jllama.cpp @@ -112,28 +112,6 @@ char **parse_string_array(JNIEnv *env, const jobjectArray string_array, const js return result; } -std::vector parse_string_array_for_rerank(JNIEnv *env, const jobjectArray string_array, - const jsize length) { - std::vector result; - result.reserve(length); // Reserve memory for efficiency - - for (jsize i = 0; i < length; i++) { - jstring javaString = static_cast(env->GetObjectArrayElement(string_array, i)); - if (javaString == nullptr) - continue; - - const char *cString = env->GetStringUTFChars(javaString, nullptr); - if (cString != nullptr) { - result.emplace_back(cString); // Add to vector - env->ReleaseStringUTFChars(javaString, cString); - } - - env->DeleteLocalRef(javaString); // Avoid memory leaks - } - - return result; -} - void free_string_array(char **array, jsize length) { if (array != nullptr) { for (jsize i = 0; i < length; i++) { @@ -720,17 +698,18 @@ JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jo const auto tokenized_query = tokenize_mixed(ctx_server->vocab, prompt, true, true); json responses = json::array(); - bool error = false; std::vector tasks; - const jsize argc = env->GetArrayLength(documents); - std::vector documentsArray = parse_string_array_for_rerank(env, documents, argc); + const jsize amount_documents = env->GetArrayLength(documents); + auto *document_array = parse_string_array(env, documents, amount_documents); + auto document_vector = std::vector(document_array, document_array + amount_documents); + free_string_array(document_array, amount_documents); - std::vector tokenized_docs = tokenize_input_prompts(ctx_server->vocab, documentsArray, true, true); + std::vector tokenized_docs = tokenize_input_prompts(ctx_server->vocab, document_vector, true, true); tasks.reserve(tokenized_docs.size()); - for (size_t i = 0; i < tokenized_docs.size(); i++) { - server_task task = server_task(SERVER_TASK_TYPE_RERANK); + for (int i = 0; i < tokenized_docs.size(); i++) { + auto task = server_task(SERVER_TASK_TYPE_RERANK); task.id = ctx_server->queue_tasks.get_new_id(); task.index = i; task.prompt_tokens = format_rerank(ctx_server->vocab, tokenized_query, tokenized_docs[i]); @@ -753,7 +732,7 @@ JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jo for (int i = 0; i < (int)task_ids.size(); i++) { server_task_result_ptr result = ctx_server->queue_results.recv(task_ids); if (result->is_error()) { - std::string response = result->to_json()["message"].get(); + auto response = result->to_json()["message"].get(); for (const int id_task : task_ids) { ctx_server->queue_results.remove_waiting_task_id(id_task); } @@ -771,7 +750,7 @@ JNIEXPORT jobject JNICALL Java_de_kherud_llama_LlamaModel_rerank(JNIEnv *env, jo int index = out_res["index"].get(); float score = out_res["score"].get(); - std::string tok_str = documentsArray[index]; + std::string tok_str = document_vector[index]; jstring jtok_str = env->NewStringUTF(tok_str.c_str()); jobject jprob = env->NewObject(c_float, cc_float, score); From 62cc40eff9e322815b2c750b95215b78597dc099 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Tue, 18 Mar 2025 21:25:39 +0100 Subject: [PATCH 18/29] replace jackson with string builder --- pom.xml | 5 -- .../de/kherud/llama/InferenceParameters.java | 55 ++++++++++--------- 2 files changed, 29 insertions(+), 31 deletions(-) diff --git a/pom.xml b/pom.xml index f4e1e45..4982f40 100644 --- a/pom.xml +++ b/pom.xml @@ -65,11 +65,6 @@ 24.1.0 compile - - com.fasterxml.jackson.core - jackson-databind - 2.16.0 - diff --git a/src/main/java/de/kherud/llama/InferenceParameters.java b/src/main/java/de/kherud/llama/InferenceParameters.java index e868be0..41f74cc 100644 --- a/src/main/java/de/kherud/llama/InferenceParameters.java +++ b/src/main/java/de/kherud/llama/InferenceParameters.java @@ -4,10 +4,6 @@ import java.util.List; import java.util.Map; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ArrayNode; -import com.fasterxml.jackson.databind.node.ObjectNode; - import de.kherud.llama.args.MiroStat; import de.kherud.llama.args.Sampler; @@ -16,10 +12,8 @@ * and * {@link LlamaModel#complete(InferenceParameters)}. */ +@SuppressWarnings("unused") public final class InferenceParameters extends JsonParameters { - - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Reusable ObjectMapper - private static final String PARAM_PROMPT = "prompt"; private static final String PARAM_INPUT_PREFIX = "input_prefix"; @@ -489,13 +483,8 @@ public InferenceParameters setSamplers(Sampler... samplers) { return this; } - InferenceParameters setStream(boolean stream) { - parameters.put(PARAM_STREAM, String.valueOf(stream)); - return this; - } - /** - * Set whether or not generate should apply a chat template (default: false) + * Set whether generate should apply a chat template (default: false) */ public InferenceParameters setUseChatTemplate(boolean useChatTemplate) { parameters.put(PARAM_USE_JINJA, String.valueOf(useChatTemplate)); @@ -508,18 +497,22 @@ public InferenceParameters setUseChatTemplate(boolean useChatTemplate) { * - Allows **one or more** user/assistant messages. */ public InferenceParameters setMessages(String systemMessage, List> messages) { - ArrayNode messagesArray = OBJECT_MAPPER.createArrayNode(); + StringBuilder messagesBuilder = new StringBuilder(); + messagesBuilder.append("["); // Add system message (if provided) if (systemMessage != null && !systemMessage.isEmpty()) { - ObjectNode systemObj = OBJECT_MAPPER.createObjectNode(); - systemObj.put("role", "system"); - systemObj.put("content", systemMessage); - messagesArray.add(systemObj); + messagesBuilder.append("{\"role\": \"system\", \"content\": ") + .append(toJsonString(systemMessage)) + .append("}"); + if (!messages.isEmpty()) { + messagesBuilder.append(", "); + } } // Add user/assistant messages - for (Pair message : messages) { + for (int i = 0; i < messages.size(); i++) { + Pair message = messages.get(i); String role = message.getKey(); String content = message.getValue(); @@ -527,17 +520,27 @@ public InferenceParameters setMessages(String systemMessage, List Date: Tue, 18 Mar 2025 21:29:57 +0100 Subject: [PATCH 19/29] update readme code examples --- README.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 32f555e..1990aac 100644 --- a/README.md +++ b/README.md @@ -94,8 +94,8 @@ public class Example { public static void main(String... args) throws IOException { ModelParameters modelParams = new ModelParameters() - .setModelFilePath("/path/to/model.gguf") - .setNGpuLayers(43); + .setModel("models/mistral-7b-instruct-v0.2.Q2_K.gguf") + .setGpuLayers(43); String system = "This is a conversation between User and Llama, a friendly chatbot.\n" + "Llama is helpful, kind, honest, good at writing, and never fails to answer any " + @@ -114,8 +114,8 @@ public class Example { InferenceParameters inferParams = new InferenceParameters(prompt) .setTemperature(0.7f) .setPenalizeNl(true) - .setMirostat(InferenceParameters.MiroStat.V2) - .setAntiPrompt("\n"); + .setMiroStat(MiroStat.V2) + .setStopStrings("User:"); for (LlamaOutput output : model.generate(inferParams)) { System.out.print(output); prompt += output; @@ -135,7 +135,7 @@ model to your prompt in order to extend the context. If there is repeated conten cache this, to improve performance. ```java -ModelParameters modelParams = new ModelParameters().setModelFilePath("/path/to/model.gguf"); +ModelParameters modelParams = new ModelParameters().setModel("/path/to/model.gguf"); InferenceParameters inferParams = new InferenceParameters("Tell me a joke."); try (LlamaModel model = new LlamaModel(modelParams)) { // Stream a response and access more information about each output. @@ -167,9 +167,8 @@ for every inference task. All non-specified options have sensible defaults. ```java ModelParameters modelParams = new ModelParameters() - .setModelFilePath("/path/to/model.gguf") - .setLoraAdapter("/path/to/lora/adapter") - .setLoraBase("/path/to/lora/base"); + .setModel("/path/to/model.gguf") + .addLoraAdapter("/path/to/lora/adapter"); String grammar = """ root ::= (expr "=" term "\\n")+ expr ::= term ([-+*/] term)* From 1ad2bf6840fb6a2033f9b9a717031d7ca0e26259 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Tue, 18 Mar 2025 21:32:14 +0100 Subject: [PATCH 20/29] update to latest llama.cpp version --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2278d45..8f402fa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,7 @@ set(LLAMA_BUILD_COMMON ON) FetchContent_Declare( llama.cpp GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git - GIT_TAG b4831 + GIT_TAG b4916 ) FetchContent_MakeAvailable(llama.cpp) From 56d7d2d3c5b8e9ed27c5367f383d2b9faf3f9cd4 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Tue, 18 Mar 2025 21:44:17 +0100 Subject: [PATCH 21/29] update pom.xml version 4.0.0 -> 4.1.0 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 4982f40..3916a9e 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ de.kherud llama - 4.0.0 + 4.1.0 jar ${project.groupId}:${project.artifactId} From 481714559fd5c80bad3a51edfa4c5887c0b528b3 Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Tue, 18 Mar 2025 21:54:26 +0100 Subject: [PATCH 22/29] update readme versions --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1990aac..1bc278b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ ![Java 11+](https://img.shields.io/badge/Java-11%2B-informational) -![llama.cpp b4831](https://img.shields.io/badge/llama.cpp-%23b4831-informational) +![llama.cpp b4916](https://img.shields.io/badge/llama.cpp-%23b4916-informational) # Java Bindings for [llama.cpp](https://github.com/ggerganov/llama.cpp) @@ -16,6 +16,9 @@ Inference of Meta's LLaMA model (and others) in pure C/C++. 2.3 [Infilling](#infilling) 3. [Android](#importing-in-android) +> [!NOTE] +> Now with support for Gemma 3 + ## Quick Start Access this library via Maven: @@ -24,7 +27,7 @@ Access this library via Maven: de.kherud llama - 4.0.0 + 4.1.0 ``` From d34c1a1db7ba116277a82539c267cca146458264 Mon Sep 17 00:00:00 2001 From: Pierre Date: Mon, 28 Apr 2025 15:26:08 +0200 Subject: [PATCH 23/29] Fix the enums PoolingType and RopeScalingType and their calls in ModelParameters --- .../java/de/kherud/llama/ModelParameters.java | 6 +++-- .../de/kherud/llama/args/PoolingType.java | 24 +++++++++---------- .../de/kherud/llama/args/RopeScalingType.java | 24 +++++++++---------- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/src/main/java/de/kherud/llama/ModelParameters.java b/src/main/java/de/kherud/llama/ModelParameters.java index e4947d4..7999295 100644 --- a/src/main/java/de/kherud/llama/ModelParameters.java +++ b/src/main/java/de/kherud/llama/ModelParameters.java @@ -459,7 +459,7 @@ public ModelParameters setJsonSchema(String schema) { * Set pooling type for embeddings (default: model default if unspecified). */ public ModelParameters setPoolingType(PoolingType type) { - parameters.put("--pooling", String.valueOf(type.getId())); + parameters.put("--pooling", type.getArgValue()); return this; } @@ -467,7 +467,7 @@ public ModelParameters setPoolingType(PoolingType type) { * Set RoPE frequency scaling method (default: linear unless specified by the model). */ public ModelParameters setRopeScaling(RopeScalingType type) { - parameters.put("--rope-scaling", String.valueOf(type.getId())); + parameters.put("--rope-scaling", type.getArgValue()); return this; } @@ -960,3 +960,5 @@ public ModelParameters enableJinja() { } } + + diff --git a/src/main/java/de/kherud/llama/args/PoolingType.java b/src/main/java/de/kherud/llama/args/PoolingType.java index a9c9dba..c0379c8 100644 --- a/src/main/java/de/kherud/llama/args/PoolingType.java +++ b/src/main/java/de/kherud/llama/args/PoolingType.java @@ -2,20 +2,20 @@ public enum PoolingType { - UNSPECIFIED(-1), - NONE(0), - MEAN(1), - CLS(2), - LAST(3), - RANK(4); + UNSPECIFIED("unspecified"), + NONE("none"), + MEAN("mean"), + CLS("cls"), + LAST("last"), + RANK("rank"); - private final int id; + private final String argValue; - PoolingType(int value) { - this.id = value; + PoolingType(String value) { + this.argValue = value; } - public int getId() { - return id; + public String getArgValue() { + return argValue; } -} +} \ No newline at end of file diff --git a/src/main/java/de/kherud/llama/args/RopeScalingType.java b/src/main/java/de/kherud/llama/args/RopeScalingType.java index eed939a..138d05b 100644 --- a/src/main/java/de/kherud/llama/args/RopeScalingType.java +++ b/src/main/java/de/kherud/llama/args/RopeScalingType.java @@ -2,20 +2,20 @@ public enum RopeScalingType { - UNSPECIFIED(-1), - NONE(0), - LINEAR(1), - YARN2(2), - LONGROPE(3), - MAX_VALUE(3); + UNSPECIFIED("unspecified"), + NONE("none"), + LINEAR("linear"), + YARN2("yarn"), + LONGROPE("longrope"), + MAX_VALUE("maxvalue"); - private final int id; + private final String argValue; - RopeScalingType(int value) { - this.id = value; + RopeScalingType(String value) { + this.argValue = value; } - public int getId() { - return id; + public String getArgValue() { + return argValue; } -} +} \ No newline at end of file From b17e212d0a71c100ff9925b1bcf09d44093a7b57 Mon Sep 17 00:00:00 2001 From: prabhdatnoor <--get> Date: Wed, 7 May 2025 22:40:12 -0400 Subject: [PATCH 24/29] change os name to darwin --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8f402fa..b95d4ea 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -69,7 +69,7 @@ endif() # include jni.h and jni_md.h if(NOT DEFINED JNI_INCLUDE_DIRS) - if(OS_NAME MATCHES "^Linux" OR OS_NAME STREQUAL "Mac") + if(OS_NAME MATCHES "^Linux" OR OS_NAME STREQUAL "Darwin") set(JNI_INCLUDE_DIRS .github/include/unix) elseif(OS_NAME STREQUAL "Windows") set(JNI_INCLUDE_DIRS .github/include/windows) From a850c2ba1c3bbdedb0ec0c556615bab87e5b0f7a Mon Sep 17 00:00:00 2001 From: prabhdatnoor <--get> Date: Sat, 10 May 2025 17:12:57 -0400 Subject: [PATCH 25/29] also add Mac for arm mac support --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b95d4ea..96c6295 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -69,7 +69,7 @@ endif() # include jni.h and jni_md.h if(NOT DEFINED JNI_INCLUDE_DIRS) - if(OS_NAME MATCHES "^Linux" OR OS_NAME STREQUAL "Darwin") + if(OS_NAME MATCHES "^Linux" OR OS_NAME STREQUAL "Mac" OR OS_NAME STREQUAL "Darwin") set(JNI_INCLUDE_DIRS .github/include/unix) elseif(OS_NAME STREQUAL "Windows") set(JNI_INCLUDE_DIRS .github/include/windows) From 31b08480f36dec728de8cb5d10f11bb158a2c1cd Mon Sep 17 00:00:00 2001 From: Holger Voormann Date: Mon, 19 May 2025 20:13:13 +0200 Subject: [PATCH 26/29] OSInfo: Update link to Java bug #8005545 In a comment, update the link to Java bug #8005545, as the current one leads to a webpage saying: "This bug is not available." --- src/main/java/de/kherud/llama/OSInfo.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/de/kherud/llama/OSInfo.java b/src/main/java/de/kherud/llama/OSInfo.java index 772aeae..9354ec2 100644 --- a/src/main/java/de/kherud/llama/OSInfo.java +++ b/src/main/java/de/kherud/llama/OSInfo.java @@ -200,7 +200,7 @@ else if (armType.startsWith("aarch64")) { } // Java 1.8 introduces a system property to determine armel or armhf - // http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8005545 + // https://bugs.openjdk.org/browse/JDK-8005545 String abi = System.getProperty("sun.arch.abi"); if (abi != null && abi.startsWith("gnueabihf")) { return "armv7"; From 711990c1544a2a923453721073a6ae6ed1bd2a65 Mon Sep 17 00:00:00 2001 From: Arne Juul Date: Fri, 20 Jun 2025 18:23:34 +0000 Subject: [PATCH 27/29] remove unused code --- src/main/cpp/server.hpp | 148 ---------------------------------------- 1 file changed, 148 deletions(-) diff --git a/src/main/cpp/server.hpp b/src/main/cpp/server.hpp index 66169a8..9686f2a 100644 --- a/src/main/cpp/server.hpp +++ b/src/main/cpp/server.hpp @@ -3269,151 +3269,3 @@ struct server_context { }; } }; - -static void common_params_handle_model_default(std::string &model, const std::string &model_url, std::string &hf_repo, - std::string &hf_file, const std::string &hf_token) { - if (!hf_repo.empty()) { - // short-hand to avoid specifying --hf-file -> default it to --model - if (hf_file.empty()) { - if (model.empty()) { - auto auto_detected = common_get_hf_file(hf_repo, hf_token); - if (auto_detected.first.empty() || auto_detected.second.empty()) { - exit(1); // built without CURL, error message already printed - } - hf_repo = auto_detected.first; - hf_file = auto_detected.second; - } else { - hf_file = model; - } - } - // make sure model path is present (for caching purposes) - if (model.empty()) { - // this is to avoid different repo having same file name, or same file name in different subdirs - std::string filename = hf_repo + "_" + hf_file; - // to make sure we don't have any slashes in the filename - string_replace_all(filename, "/", "_"); - model = fs_get_cache_file(filename); - } - } else if (!model_url.empty()) { - if (model.empty()) { - auto f = string_split(model_url, '#').front(); - f = string_split(f, '?').front(); - model = fs_get_cache_file(string_split(f, '/').back()); - } - } else if (model.empty()) { - model = DEFAULT_MODEL_PATH; - } -} - -// parse the given jparams (see de.kherud.llama.args.ModelParameters#toString()) from JSON to the required C++ struct. -static void server_params_parse(json jparams, common_params ¶ms) { - common_params default_params; - - params.sampling.seed = json_value(jparams, "seed", default_params.sampling.seed); - params.cpuparams.n_threads = json_value(jparams, "n_threads", default_params.cpuparams.n_threads); - params.speculative.cpuparams.n_threads = - json_value(jparams, "n_threads_draft", default_params.speculative.cpuparams.n_threads); - params.cpuparams_batch.n_threads = json_value(jparams, "n_threads_batch", default_params.cpuparams_batch.n_threads); - params.speculative.cpuparams_batch.n_threads = - json_value(jparams, "n_threads_batch_draft", default_params.speculative.cpuparams_batch.n_threads); - params.n_predict = json_value(jparams, "n_predict", default_params.n_predict); - params.n_ctx = json_value(jparams, "n_ctx", default_params.n_ctx); - params.n_batch = json_value(jparams, "n_batch", default_params.n_batch); - params.n_ubatch = json_value(jparams, "n_ubatch", default_params.n_ubatch); - params.n_keep = json_value(jparams, "n_keep", default_params.n_keep); - - params.speculative.n_max = json_value(jparams, "n_draft", default_params.speculative.n_max); - params.speculative.n_min = json_value(jparams, "n_draft_min", default_params.speculative.n_min); - - params.n_chunks = json_value(jparams, "n_chunks", default_params.n_chunks); - params.n_parallel = json_value(jparams, "n_parallel", default_params.n_parallel); - params.n_sequences = json_value(jparams, "n_sequences", default_params.n_sequences); - params.speculative.p_split = json_value(jparams, "p_split", default_params.speculative.p_split); - params.grp_attn_n = json_value(jparams, "grp_attn_n", default_params.grp_attn_n); - params.grp_attn_w = json_value(jparams, "grp_attn_w", default_params.grp_attn_w); - params.n_print = json_value(jparams, "n_print", default_params.n_print); - params.rope_freq_base = json_value(jparams, "rope_freq_base", default_params.rope_freq_base); - params.rope_freq_scale = json_value(jparams, "rope_freq_scale", default_params.rope_freq_scale); - params.yarn_ext_factor = json_value(jparams, "yarn_ext_factor", default_params.yarn_ext_factor); - params.yarn_attn_factor = json_value(jparams, "yarn_attn_factor", default_params.yarn_attn_factor); - params.yarn_beta_fast = json_value(jparams, "yarn_beta_fast", default_params.yarn_beta_fast); - params.yarn_beta_slow = json_value(jparams, "yarn_beta_slow", default_params.yarn_beta_slow); - params.yarn_orig_ctx = json_value(jparams, "yarn_orig_ctx", default_params.yarn_orig_ctx); - params.defrag_thold = json_value(jparams, "defrag_thold", default_params.defrag_thold); - params.numa = json_value(jparams, "numa", default_params.numa); - params.rope_scaling_type = json_value(jparams, "rope_scaling_type", default_params.rope_scaling_type); - params.pooling_type = json_value(jparams, "pooling_type", default_params.pooling_type); - params.model = json_value(jparams, "model", default_params.model); - params.speculative.model = json_value(jparams, "model_draft", default_params.speculative.model); - params.model_alias = json_value(jparams, "model_alias", default_params.model_alias); - params.model_url = json_value(jparams, "model_url", default_params.model_url); - params.hf_repo = json_value(jparams, "hf_repo", default_params.hf_repo); - params.hf_file = json_value(jparams, "hf_file", default_params.hf_file); - params.prompt = json_value(jparams, "prompt", default_params.prompt); - params.prompt_file = json_value(jparams, "prompt_file", default_params.prompt_file); - params.path_prompt_cache = json_value(jparams, "path_prompt_cache", default_params.path_prompt_cache); - params.input_prefix = json_value(jparams, "input_prefix", default_params.input_prefix); - params.input_suffix = json_value(jparams, "input_suffix", default_params.input_suffix); - params.antiprompt = json_value(jparams, "antiprompt", default_params.antiprompt); - params.lookup_cache_static = json_value(jparams, "lookup_cache_static", default_params.lookup_cache_static); - params.lookup_cache_dynamic = json_value(jparams, "lookup_cache_dynamic", default_params.lookup_cache_dynamic); - params.logits_file = json_value(jparams, "logits_file", default_params.logits_file); - // params.lora_adapters = json_value(jparams, "lora_adapter", default_params.lora_adapters); - params.embedding = json_value(jparams, "embedding", default_params.embedding); - params.escape = json_value(jparams, "escape", default_params.escape); - params.cont_batching = json_value(jparams, "cont_batching", default_params.cont_batching); - params.flash_attn = json_value(jparams, "flash_attn", default_params.flash_attn); - params.input_prefix_bos = json_value(jparams, "input_prefix_bos", default_params.input_prefix_bos); - params.sampling.ignore_eos = json_value(jparams, "ignore_eos", default_params.sampling.ignore_eos); - params.use_mmap = json_value(jparams, "use_mmap", default_params.use_mmap); - params.use_mlock = json_value(jparams, "use_mlock", default_params.use_mlock); - params.no_kv_offload = json_value(jparams, "no_kv_offload", default_params.no_kv_offload); - params.chat_template = json_value(jparams, "chat_template", default_params.chat_template); - - if (jparams.contains("n_gpu_layers")) { - if (llama_supports_gpu_offload()) { - params.n_gpu_layers = json_value(jparams, "n_gpu_layers", default_params.n_gpu_layers); - params.speculative.n_gpu_layers = - json_value(jparams, "n_gpu_layers_draft", default_params.speculative.n_gpu_layers); - } else { - SRV_WRN("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. " - "See main README.md for information on enabling GPU BLAS support: %s = %d", - "n_gpu_layers", params.n_gpu_layers); - } - } - - if (jparams.contains("split_mode")) { - params.split_mode = json_value(jparams, "split_mode", default_params.split_mode); -// todo: the definition checks here currently don't work due to cmake visibility reasons -#ifndef GGML_USE_CUDA - fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n"); -#endif - } - - if (jparams.contains("tensor_split")) { -#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL) - std::vector tensor_split = jparams["tensor_split"].get>(); - GGML_ASSERT(tensor_split.size() <= llama_max_devices()); - - for (size_t i_device = 0; i_device < llama_max_devices(); ++i_device) { - if (i_device < tensor_split.size()) { - params.tensor_split[i_device] = tensor_split.at(i_device); - } else { - params.tensor_split[i_device] = 0.0f; - } - } -#else - SRV_WRN("%s", "llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n"); -#endif // GGML_USE_CUDA - } - - if (jparams.contains("main_gpu")) { -#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL) - params.main_gpu = json_value(jparams, "main_gpu", default_params.main_gpu); -#else - SRV_WRN("%s", "llama.cpp was compiled without CUDA. It is not possible to set a main GPU."); -#endif - } - - common_params_handle_model_default(params.model, params.model_url, params.hf_repo, params.hf_file, params.hf_token); -} From 1aa872a2329d8efe4b85cb5ed80f9ab7b7df754a Mon Sep 17 00:00:00 2001 From: Arne Juul Date: Fri, 20 Jun 2025 18:24:35 +0000 Subject: [PATCH 28/29] remove duplicated code common_chat_templates_init is already done at end of load_model in server.hpp --- src/main/cpp/jllama.cpp | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/src/main/cpp/jllama.cpp b/src/main/cpp/jllama.cpp index ac056b9..11c80ae 100644 --- a/src/main/cpp/jllama.cpp +++ b/src/main/cpp/jllama.cpp @@ -452,16 +452,6 @@ JNIEXPORT void JNICALL Java_de_kherud_llama_LlamaModel_loadModel(JNIEnv *env, jo llama_init_dft.context.reset(); } - ctx_server->chat_templates = common_chat_templates_init(ctx_server->model, params.chat_template); - try { - common_chat_format_example(ctx_server->chat_templates.get(), params.use_jinja); - } catch (const std::exception &e) { - SRV_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This " - "may cause the model to output suboptimal responses\n", - __func__); - ctx_server->chat_templates = common_chat_templates_init(ctx_server->model, "chatml"); - } - // print sample chat example to make it clear which template is used LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__, common_chat_templates_source(ctx_server->chat_templates.get()), @@ -860,4 +850,4 @@ JNIEXPORT jbyteArray JNICALL Java_de_kherud_llama_LlamaModel_jsonSchemaToGrammar nlohmann::ordered_json c_schema_json = nlohmann::ordered_json::parse(c_schema); const std::string c_grammar = json_schema_to_grammar(c_schema_json); return parse_jbytes(env, c_grammar); -} \ No newline at end of file +} From 49be66475700487e9ae9be5ba1d22b5855bb0d1c Mon Sep 17 00:00:00 2001 From: Konstantin Herud Date: Fri, 20 Jun 2025 21:25:39 +0200 Subject: [PATCH 29/29] bump pom.xml version 4.1.0 -> 4.20 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 3916a9e..67b366e 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ de.kherud llama - 4.1.0 + 4.2.0 jar ${project.groupId}:${project.artifactId}