Skip to content

Commit

Permalink
Fix passing -m to r2ai via r2ai-server
Browse files Browse the repository at this point in the history
  • Loading branch information
radare committed Sep 16, 2024
1 parent c2b842b commit f12dd71
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 5 deletions.
9 changes: 5 additions & 4 deletions r2ai-server/r2ai-server
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@ MODEL=""
PORT=8080
MODELDIR=~/.r2ai.models
LLAMA=llamafile
[ -z "${R2AI}" ] && R2AI=r2ai

main() {
if [ -z "${MODEL}" ]; then
echo "Use -l and -m to select a model"
echo "Use -l and -m to select a model" >&2
exit 1
fi
echo "${MODEL}" | grep -e ^/
Expand All @@ -27,7 +28,7 @@ main() {
koboldcpp -c 0 --port ${PORT} -m "${MODELPATH}"
;;
r2ai)
r2ai -c="-e http.port=${PORT}" -c="-m ${MODEL}" -c="-w"
${R2AI} -c="-e http.port=${PORT}" -m "${MODEL}" -w
;;
*)
echo "Invalid llama server selected."
Expand Down Expand Up @@ -75,11 +76,11 @@ while : ; do
models
break
fi
MODEL=$2
MODEL="$2"
shift
;;
-p)
PORT=$2
PORT="$2"
shift
;;
-h)
Expand Down
2 changes: 1 addition & 1 deletion r2ai.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ else
. venv/bin/activate
fi
# export PYTHONPATH=$PWD
$PYTHON $D/main.py $@
exec $PYTHON $D/main.py "$@"

0 comments on commit f12dd71

Please sign in to comment.