# shellcheck shell=dash disable=SC2034

xrc cfgy
___x_cmd_llmf_cfg(){
    [ "$#" -gt 0 ] || {
        ___x_cmd_llmf_init
        return
    }

    local op="$1"; shift
    case "$op" in
        -h|--help)      ___x_cmd help -m llmf --cfg ;;
        *)              ___x_cmd_llmf_cfg___invoke "$op" "$@" ;;
    esac
}

___x_cmd_llmf_cur(){
    local X_help_cmd=; X_help_cmd='___x_cmd help -m llmf --cur' help:arg:parse
    ___x_cmd_llmf_cfg --current "$@"
}

___x_cmd_llmf_init(){
    local cur_apikey=; local cur_model=; local cur_endpoint=

    if [ -f "$(___x_cmd_llmf_cur --get config)" ]; then
        ___x_cmd_llmf_cur cur_model:=model cur_endpoint:=endpoint 2>/dev/null
    fi

    ___x_cmd_llmf_cfg___invoke --init ${cur_model:+"--ctrl_exit_strategy"} \
        model       "Set the model of llmf ai"                            \
                    "${cur_model:-"$___X_CMD_LLMF_DEFAULT_FIRST_MODEL"}"   '=' "llava/v1.5-7b/q4_k.gguf" "tinyllama/1.1b-chat-v1.0/q4_k_m.gguf" --    \
        endpoint    "Set up the endpoint (optional)"                        \
                    "${cur_endpoint:-"$___X_CMD_LLMF_DEFAULT_ENDPOINT"}" || return $?

    ! ___x_cmd_llmf___hascmd 2>/dev/null >&2 || ___x_cmd chat --setalias llmf l
}

# Config setting the key.
# set the default prompt

___X_CMD_LLMF_CFG_VARLIST="model,endpoint,maxtoken,seed,temperature"
___x_cmd_llmf_cfg___invoke(){
    ___x_cmd_cfgy_obj                                               \
        --prefix            ___X_CMD_LLMF_CFG_DATA                \
        --default-config    "${___X_CMD_ROOT_CFG}/llmf/X.cfg.yml" \
        --current-config    "${___X_CMD_LLMF_LOCAL_CONFIG}"       \
        --current-profile   "${___X_CMD_LLMF_LOCAL_PROFILE}"      \
        --varlist           "$___X_CMD_LLMF_CFG_VARLIST"          \
        "$@"
}

