# shellcheck shell=dash
# https://docs.github.com/en/github-models/prototyping-with-ai-models#experimenting-with-ai-models-using-the-api

___x_cmd_gh_model_chat(){
    local X_help_cmd='___x_cmd help -m gh model chat'; help:arg-null:parse
    local op="$1";
    case "$op" in
        request|exec)
            shift; ___x_cmd_gh_model_chat_"$op" "$@" ;;
        --def-model_)
            ___x_cmd_gh_model_chat_def_model_ "$@" ;;
        *)  N=gh M="Not support such [subcmd=$op]" log:ret:64 ;;
    esac
}

___x_cmd_gh_model_chat_def_model_(){
    x_="$___X_CMD_GH_MODEL_DEFAULT_NAME"
}

___x_cmd_gh_model_chat_request(){
    local X_help_cmd='___x_cmd help -m gh model chat request'; help:arg:parse
    ___x_cmd chat --exec --provider gh "$@"
}

___x_cmd_gh_model_chat_exec(){
    ___X_CMD_OPENAI_CHAT_ACTUAL_URL_NOT_V1=1 \
    ___X_CMD_OPENAI_CHAT_ACTUAL_PROVIDER='gh'   \
    ___X_CMD_OPENAI_CHAT_ACTUAL_PROVIDER_NAME='GitHub Models'  \
    ___X_CMD_OPENAI_CHAT_ACTUAL_ENDPOINT="https://models.github.ai/inference"    \
    ___x_cmd openai chat exec "$@"
}
