diff --git a/llama_bringup/package.xml b/llama_bringup/package.xml index b09f847d..d6f7ebb6 100644 --- a/llama_bringup/package.xml +++ b/llama_bringup/package.xml @@ -2,7 +2,7 @@ llama_bringup - 4.1.7 + 4.1.8 Bringup package for llama_ros Miguel Ángel González Santamarta MIT diff --git a/llama_cli/package.xml b/llama_cli/package.xml index c2439cb9..40d07ffa 100644 --- a/llama_cli/package.xml +++ b/llama_cli/package.xml @@ -2,7 +2,7 @@ llama_cli - 4.1.7 + 4.1.8 Cli package for llama_ros Miguel Ángel González Santamarta MIT diff --git a/llama_cli/setup.py b/llama_cli/setup.py index bb6b52f5..9a0d28bf 100644 --- a/llama_cli/setup.py +++ b/llama_cli/setup.py @@ -3,7 +3,7 @@ setup( name="llama_cli", - version="4.1.7", + version="4.1.8", packages=find_packages(exclude=["test"]), zip_safe=True, author="Miguel Ángel González Santamarta", diff --git a/llama_cpp_vendor/package.xml b/llama_cpp_vendor/package.xml index 450a314e..62088cda 100644 --- a/llama_cpp_vendor/package.xml +++ b/llama_cpp_vendor/package.xml @@ -2,7 +2,7 @@ llama_cpp_vendor - 4.1.7 + 4.1.8 Vendor package for llama.cpp. Miguel Ángel González Santamarta MIT diff --git a/llama_demos/package.xml b/llama_demos/package.xml index 45d2b374..6c243df3 100644 --- a/llama_demos/package.xml +++ b/llama_demos/package.xml @@ -2,7 +2,7 @@ llama_demos - 4.1.7 + 4.1.8 Demos for llama_ros Miguel Ángel González Santamarta MIT diff --git a/llama_msgs/package.xml b/llama_msgs/package.xml index 0e43c2c4..abf13501 100644 --- a/llama_msgs/package.xml +++ b/llama_msgs/package.xml @@ -2,7 +2,7 @@ llama_msgs - 4.1.7 + 4.1.8 Msgs for llama_ros Miguel Ángel González Santamarta MIT diff --git a/llama_ros/package.xml b/llama_ros/package.xml index fed6aaa4..ac82a0eb 100644 --- a/llama_ros/package.xml +++ b/llama_ros/package.xml @@ -2,7 +2,7 @@ llama_ros - 4.1.7 + 4.1.8 llama.cpp for ROS 2 Miguel Ángel González Santamarta MIT diff --git a/llama_ros/src/llama_ros/llama.cpp b/llama_ros/src/llama_ros/llama.cpp index 6b8f0ba2..e68d261f 100644 --- a/llama_ros/src/llama_ros/llama.cpp +++ b/llama_ros/src/llama_ros/llama.cpp @@ -817,13 +817,13 @@ Llama::find_stop(std::vector completion_result_list, // respect the maximum number of tokens if (this->n_past > this->params.n_predict && this->params.n_predict != -1) { - LLAMA_LOG_INFO("Maximun number of tokens reached %d", + LLAMA_LOG_INFO("Maximum number of tokens reached %d", this->params.n_predict); return FULL_STOP; } if (this->n_past > this->get_n_ctx() && this->params.n_predict == -2) { - LLAMA_LOG_INFO("Maximun number of tokens reached %d", this->get_n_ctx()); + LLAMA_LOG_INFO("Maximum number of tokens reached %d", this->get_n_ctx()); return FULL_STOP; }