# shellcheck shell=dash disable=SC2034

___x_cmd_openai_request_generaxwtecontent(){
    local act_provider="$___X_CMD_OPENAI_CHAT_ACTUAL_PROVIDER"
    ___x_cmd_openai_chat_provider___validate "$act_provider" || return $?

    local request_body_file="${request_body_file}"
    local model="$1"

    local apikey=;   local proxy=;  local endpoint=;
    case "$act_provider" in
        lms)    ___x_cmd lms --cur endpoint:= 2>/dev/null ;;
        gh)     ___x_cmd gh --cur apikey:=token proxy:=ai_proxy 2>/dev/null ;;
        ollama|llmf) ;;
        *)      ___x_cmd "$act_provider" --cur apikey:= proxy:= endpoint:= 2>/dev/null ;;
    esac

    if [ -z "$model" ]; then
        ___x_cmd log ":${act_provider}" info "Requesting ${___X_CMD_OPENAI_CHAT_ACTUAL_PROVIDER_NAME} with the loaded model"
    else
        ___x_cmd log ":${act_provider}" info "Requesting ${___X_CMD_OPENAI_CHAT_ACTUAL_PROVIDER_NAME} [model=$model]"
    fi


    local url="${___X_CMD_OPENAI_CHAT_ACTUAL_REQUEST_URL}"
    [ -n "$url" ] || {
        endpoint="${endpoint:-"$___X_CMD_OPENAI_CHAT_ACTUAL_ENDPOINT"}"
        endpoint="${endpoint:-"$___X_CMD_OPENAI_DEFAULT_ENDPOINT"}"
        url="$endpoint/v1/chat/completions"
        [ -z "$___X_CMD_OPENAI_CHAT_ACTUAL_URL_NOT_V1" ] || url="$endpoint/chat/completions"
    }
    ___x_cmd log ":${act_provider}" debug --url "$url" --model "$model"
    # TODO: parse content , get base64 data

    # TODO: Need to json quote the content
    # LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
    < "$request_body_file" ___x_cmd proxy runifset "$proxy" \
    ___x_cmd curl    \
        ${apikey:+-H} ${apikey:+"Authorization: Bearer $apikey"}  \
        -sS "$url" \
        -H "Content-Type: application/json" \
        -d @-
}


