| #pragma once |
|
|
| #include "common.h" |
| #include "log.h" |
| #include "llama.h" |
|
|
| #ifndef NDEBUG |
| |
| #define CPPHTTPLIB_NO_EXCEPTIONS 1 |
| #endif |
| |
| #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 |
| #include "httplib.h" |
|
|
| |
| #define JSON_ASSERT GGML_ASSERT |
| #include "json.hpp" |
|
|
| #include <random> |
| #include <sstream> |
| #include <string> |
| #include <vector> |
|
|
| #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613" |
|
|
| using json = nlohmann::ordered_json; |
|
|
| #define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
| #define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
| #define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
| #define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) |
|
|
| #define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
|
|
| #define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
| #define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) |
|
|
| |
| enum error_type { |
| ERROR_TYPE_INVALID_REQUEST, |
| ERROR_TYPE_AUTHENTICATION, |
| ERROR_TYPE_SERVER, |
| ERROR_TYPE_NOT_FOUND, |
| ERROR_TYPE_PERMISSION, |
| ERROR_TYPE_UNAVAILABLE, |
| ERROR_TYPE_NOT_SUPPORTED, |
| }; |
|
|
| template <typename T> |
| static T json_value(const json & body, const std::string & key, const T & default_value) { |
| |
| if (body.contains(key) && !body.at(key).is_null()) { |
| try { |
| return body.at(key); |
| } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) { |
| LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value\n", key.c_str(), json(default_value).type_name()); |
| return default_value; |
| } |
| } else { |
| return default_value; |
| } |
| } |
|
|
| |
| |
| |
|
|
| static bool json_is_array_of_numbers(const json & data) { |
| if (data.is_array()) { |
| for (const auto & e : data) { |
| if (!e.is_number_integer()) { |
| return false; |
| } |
| } |
| return true; |
| } |
| return false; |
| } |
|
|
| |
| static bool json_is_array_of_mixed_numbers_strings(const json & data) { |
| bool seen_string = false; |
| bool seen_number = false; |
| if (data.is_array()) { |
| for (const auto & e : data) { |
| seen_string |= e.is_string(); |
| seen_number |= e.is_number_integer(); |
| if (seen_number && seen_string) { |
| return true; |
| } |
| } |
| } |
| return false; |
| } |
|
|
| |
| |
| |
| |
| |
| static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) { |
| |
| |
| llama_tokens prompt_tokens; |
|
|
| if (json_prompt.is_array()) { |
| bool first = true; |
| for (const auto & p : json_prompt) { |
| if (p.is_string()) { |
| auto s = p.template get<std::string>(); |
|
|
| llama_tokens p; |
| if (first) { |
| p = common_tokenize(ctx, s, add_special, parse_special); |
| first = false; |
| } else { |
| p = common_tokenize(ctx, s, false, parse_special); |
| } |
|
|
| prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end()); |
| } else { |
| if (first) { |
| first = false; |
| } |
|
|
| prompt_tokens.push_back(p.template get<llama_token>()); |
| } |
| } |
| } else { |
| auto s = json_prompt.template get<std::string>(); |
| prompt_tokens = common_tokenize(ctx, s, add_special, parse_special); |
| } |
|
|
| return prompt_tokens; |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static std::vector<llama_tokens> tokenize_input_prompts(llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) { |
| std::vector<llama_tokens> result; |
| if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) { |
| |
| result.push_back(tokenize_mixed(ctx, json_prompt, add_special, parse_special)); |
| } else if (json_is_array_of_numbers(json_prompt)) { |
| |
| result.push_back(json_prompt.get<llama_tokens>()); |
| } else if (json_prompt.is_array()) { |
| |
| result.reserve(json_prompt.size()); |
| for (const auto & p : json_prompt) { |
| if (p.is_string() || json_is_array_of_mixed_numbers_strings(p)) { |
| result.push_back(tokenize_mixed(ctx, p, add_special, parse_special)); |
| } else if (json_is_array_of_numbers(p)) { |
| |
| result.push_back(p.get<llama_tokens>()); |
| } else { |
| throw std::runtime_error("element of \"prompt\" must be a string, an list of tokens, or a list of mixed strings & tokens"); |
| } |
| } |
| } else { |
| throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts"); |
| } |
| return result; |
| } |
|
|
| |
| |
| |
|
|
| |
| static llama_tokens format_rerank(const struct llama_model * model, const llama_tokens & query, const llama_tokens & doc) { |
| llama_tokens result; |
| result.reserve(doc.size() + query.size() + 4); |
| result.push_back(llama_token_bos(model)); |
| result.insert(result.end(), query.begin(), query.end()); |
| result.push_back(llama_token_eos(model)); |
| result.push_back(llama_token_sep(model)); |
| result.insert(result.end(), doc.begin(), doc.end()); |
| result.push_back(llama_token_eos(model)); |
| return result; |
| } |
|
|
| |
| static llama_tokens format_infill( |
| const llama_context * ctx, |
| const json & input_prefix, |
| const json & input_suffix, |
| const json & input_extra, |
| const int n_batch, |
| const int n_predict, |
| const int n_ctx, |
| const bool spm_infill, |
| const llama_tokens & tokens_prompt |
| ) { |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| llama_tokens extra_tokens; |
| extra_tokens.reserve(n_ctx); |
|
|
| auto model = llama_get_model(ctx); |
| auto tokens_prefix = tokenize_mixed(ctx, input_prefix, false, false); |
| auto tokens_suffix = tokenize_mixed(ctx, input_suffix, false, false); |
|
|
| if (llama_token_fim_rep(model) != LLAMA_TOKEN_NULL) { |
| |
| static const auto k_fim_repo = common_tokenize(ctx, "myproject\n", false, false); |
|
|
| extra_tokens.push_back(llama_token_fim_rep(model)); |
| extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end()); |
| } |
| for (const auto & chunk : input_extra) { |
| |
| const std::string text = json_value(chunk, "text", std::string()); |
| const std::string filename = json_value(chunk, "filename", std::string("tmp")); |
|
|
| if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) { |
| const auto k_fim_file = common_tokenize(ctx, filename + "\n", false, false); |
|
|
| extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model)); |
| extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); |
| } else { |
| |
| static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00}; |
| static const auto k_chunk_prefix_tokens = common_tokenize(ctx, k_chunk_prefix_str, false, false); |
|
|
| extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end()); |
| } |
|
|
| const auto chunk_tokens = common_tokenize(ctx, text, false, false); |
| extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end()); |
| } |
|
|
| if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) { |
| |
| static const auto k_fim_file = common_tokenize(ctx, "filename\n", false, false); |
|
|
| extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model)); |
| extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); |
| } |
|
|
| |
| const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4)); |
| const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size()))); |
|
|
| SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take)); |
|
|
| |
| const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size()); |
|
|
| tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take); |
| tokens_suffix.resize(n_suffix_take); |
|
|
| tokens_prefix.insert(tokens_prefix.begin(), llama_token_fim_pre(model)); |
| tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end()); |
| tokens_suffix.insert(tokens_suffix.begin(), llama_token_fim_suf(model)); |
|
|
| auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix; |
| auto embd_end = spm_infill ? tokens_prefix : tokens_suffix; |
|
|
| if (llama_add_bos_token(model)) { |
| embd_inp.insert(embd_inp.begin(), llama_token_bos(model)); |
| } |
|
|
| SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size()); |
|
|
| |
| embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end()); |
|
|
| embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); |
| embd_inp.push_back(llama_token_fim_mid(model)); |
|
|
| return embd_inp; |
| } |
|
|
| |
| inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) { |
| std::vector<common_chat_msg> chat; |
|
|
| for (size_t i = 0; i < messages.size(); ++i) { |
| const auto & curr_msg = messages[i]; |
|
|
| std::string role = json_value(curr_msg, "role", std::string("")); |
|
|
| std::string content; |
| if (curr_msg.contains("content")) { |
| if (curr_msg["content"].is_string()) { |
| content = curr_msg["content"].get<std::string>(); |
| } else if (curr_msg["content"].is_array()) { |
| for (const auto & part : curr_msg["content"]) { |
| if (part.contains("text")) { |
| content += "\n" + part["text"].get<std::string>(); |
| } |
| } |
| } else { |
| throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)"); |
| } |
| } else { |
| throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)"); |
| } |
|
|
| chat.push_back({role, content}); |
| } |
|
|
| const auto formatted_chat = common_chat_apply_template(model, tmpl, chat, true); |
| LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str()); |
|
|
| return formatted_chat; |
| } |
|
|
| static std::string llama_get_chat_template(const struct llama_model * model) { |
| std::string template_key = "tokenizer.chat_template"; |
| |
| int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0); |
| if (res < 0) { |
| return ""; |
| } else { |
| std::vector<char> model_template(res, 0); |
| llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); |
| return std::string(model_template.data(), model_template.size()); |
| } |
| } |
|
|
| |
| |
| |
|
|
| static const std::string base64_chars = |
| "ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| "abcdefghijklmnopqrstuvwxyz" |
| "0123456789+/"; |
|
|
| static inline bool is_base64(uint8_t c) { |
| return (isalnum(c) || (c == '+') || (c == '/')); |
| } |
|
|
| static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) { |
| int i = 0; |
| int j = 0; |
| int in_ = 0; |
|
|
| int in_len = encoded_string.size(); |
|
|
| uint8_t char_array_4[4]; |
| uint8_t char_array_3[3]; |
|
|
| std::vector<uint8_t> ret; |
|
|
| while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) { |
| char_array_4[i++] = encoded_string[in_]; in_++; |
| if (i == 4) { |
| for (i = 0; i < 4; i++) { |
| char_array_4[i] = base64_chars.find(char_array_4[i]); |
| } |
|
|
| char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); |
| char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); |
| char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; |
|
|
| for (i = 0; (i < 3); i++) { |
| ret.push_back(char_array_3[i]); |
| } |
|
|
| i = 0; |
| } |
| } |
|
|
| if (i) { |
| for (j = i; j < 4; j++) { |
| char_array_4[j] = 0; |
| } |
|
|
| for (j = 0; j < 4; j++) { |
| char_array_4[j] = base64_chars.find(char_array_4[j]); |
| } |
|
|
| char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4); |
| char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); |
| char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; |
|
|
| for (j = 0; j < i - 1; j++) { |
| ret.push_back(char_array_3[j]); |
| } |
| } |
|
|
| return ret; |
| } |
|
|
| |
| |
| |
|
|
| static std::string random_string() { |
| static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); |
|
|
| std::random_device rd; |
| std::mt19937 generator(rd()); |
|
|
| std::string result(32, ' '); |
|
|
| for (int i = 0; i < 32; ++i) { |
| result[i] = str[generator() % str.size()]; |
| } |
|
|
| return result; |
| } |
|
|
| static std::string gen_chatcmplid() { |
| return "chatcmpl-" + random_string(); |
| } |
|
|
| |
| |
| |
|
|
| static bool ends_with(const std::string & str, const std::string & suffix) { |
| return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix); |
| } |
|
|
| static size_t find_partial_stop_string(const std::string &stop, const std::string &text) { |
| if (!text.empty() && !stop.empty()) { |
| const char text_last_char = text.back(); |
| for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) { |
| if (stop[char_index] == text_last_char) { |
| const std::string current_partial = stop.substr(0, char_index + 1); |
| if (ends_with(text, current_partial)) { |
| return text.size() - char_index - 1; |
| } |
| } |
| } |
| } |
|
|
| return std::string::npos; |
| } |
|
|
| |
| template <class Iter> |
| static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) { |
| std::string ret; |
| for (; begin != end; ++begin) { |
| ret += common_token_to_piece(ctx, *begin); |
| } |
|
|
| return ret; |
| } |
|
|
| |
| static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) { |
| std::string out = token == -1 ? "" : common_token_to_piece(ctx, token); |
|
|
| |
| |
| if (out.size() == 1 && (out[0] & 0x80) == 0x80) { |
| std::stringstream ss; |
| ss << std::hex << (out[0] & 0xff); |
| std::string res(ss.str()); |
| out = "byte: \\x" + res; |
| } |
|
|
| return out; |
| } |
|
|
| struct completion_token_output { |
| llama_token tok; |
| std::string text_to_send; |
|
|
| struct token_prob { |
| llama_token tok; |
| float prob; |
| }; |
|
|
| std::vector<token_prob> probs; |
| }; |
|
|
| |
| static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) { |
| json out = json::array(); |
|
|
| for (const auto & prob : probs) { |
| json probs_for_token = json::array(); |
|
|
| for (const auto & p : prob.probs) { |
| const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok); |
| probs_for_token.push_back(json { |
| {"tok_str", tok_str}, |
| {"prob", p.prob}, |
| }); |
| } |
|
|
| const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok); |
| out.push_back(json { |
| {"content", tok_str}, |
| {"probs", probs_for_token}, |
| }); |
| } |
|
|
| return out; |
| } |
|
|
| static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) { |
| const std::string str = |
| std::string(event) + ": " + |
| data.dump(-1, ' ', false, json::error_handler_t::replace) + |
| "\n\n"; |
|
|
| LOG_DBG("data stream, to_send: %s", str.c_str()); |
|
|
| return sink.write(str.c_str(), str.size()); |
| } |
|
|
| |
| |
| |
|
|
| static json oaicompat_completion_params_parse( |
| const struct llama_model * model, |
| const json & body, |
| const std::string & chat_template) { |
| json llama_params; |
|
|
| llama_params["__oaicompat"] = true; |
|
|
| |
| llama_params["prompt"] = format_chat(model, chat_template, body.at("messages")); |
|
|
| |
| if (body.contains("stop") && body.at("stop").is_string()) { |
| llama_params["stop"] = json::array({body.at("stop").get<std::string>()}); |
| } else { |
| llama_params["stop"] = json_value(body, "stop", json::array()); |
| } |
|
|
| |
| if (body.contains("response_format")) { |
| json response_format = json_value(body, "response_format", json::object()); |
| std::string response_type = json_value(response_format, "type", std::string()); |
| if (response_type == "json_object") { |
| llama_params["json_schema"] = json_value(response_format, "schema", json::object()); |
| } else if (response_type == "json_schema") { |
| json json_schema = json_value(response_format, "json_schema", json::object()); |
| llama_params["json_schema"] = json_value(json_schema, "schema", json::object()); |
| } else if (!response_type.empty() && response_type != "text") { |
| throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type); |
| } |
| } |
|
|
| |
| int n_choices = json_value(body, "n", 1); |
| if (n_choices != 1) { |
| throw std::runtime_error("Only one completion choice is allowed"); |
| } |
|
|
| |
| |
| if (json_value(body, "logprobs", false)) { |
| llama_params["n_probs"] = json_value(body, "top_logprobs", 20); |
| } else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) { |
| throw std::runtime_error("top_logprobs requires logprobs to be set to true"); |
| } |
|
|
| |
| static const std::vector<std::string> unsupported_params { "tools", "tool_choice" }; |
| for (const auto & param : unsupported_params) { |
| if (body.contains(param)) { |
| throw std::runtime_error("Unsupported param: " + param); |
| } |
| } |
|
|
| |
| |
| |
| for (const auto & item : body.items()) { |
| |
| if (!llama_params.contains(item.key()) || item.key() == "n_predict") { |
| llama_params[item.key()] = item.value(); |
| } |
| } |
|
|
| return llama_params; |
| } |
|
|
| static json format_final_response_oaicompat(const json & request, const json & result, const std::string & completion_id, bool streaming = false, bool verbose = false) { |
| bool stopped_word = result.count("stopped_word") != 0; |
| bool stopped_eos = json_value(result, "stopped_eos", false); |
| int num_tokens_predicted = json_value(result, "tokens_predicted", 0); |
| int num_prompt_tokens = json_value(result, "tokens_evaluated", 0); |
| std::string content = json_value(result, "content", std::string("")); |
|
|
| std::string finish_reason = "length"; |
| if (stopped_word || stopped_eos) { |
| finish_reason = "stop"; |
| } |
|
|
| json choices = |
| streaming ? json::array({json{{"finish_reason", finish_reason}, |
| {"index", 0}, |
| {"delta", json::object()}}}) |
| : json::array({json{{"finish_reason", finish_reason}, |
| {"index", 0}, |
| {"message", json{{"content", content}, |
| {"role", "assistant"}}}}}); |
|
|
| std::time_t t = std::time(0); |
|
|
| json res = json { |
| {"choices", choices}, |
| {"created", t}, |
| {"model", |
| json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, |
| {"object", streaming ? "chat.completion.chunk" : "chat.completion"}, |
| {"usage", json { |
| {"completion_tokens", num_tokens_predicted}, |
| {"prompt_tokens", num_prompt_tokens}, |
| {"total_tokens", num_tokens_predicted + num_prompt_tokens} |
| }}, |
| {"id", completion_id} |
| }; |
|
|
| |
| if (verbose) { |
| res["__verbose"] = result; |
| } |
|
|
| if (result.contains("completion_probabilities")) { |
| res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array()); |
| } |
|
|
| if (result.contains("timings")) { |
| res.push_back({"timings", json_value(result, "timings", json::object())}); |
| } |
|
|
| return res; |
| } |
|
|
| |
| static std::vector<json> format_partial_response_oaicompat(const json & result, const std::string & completion_id) { |
| if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) { |
| return std::vector<json>({result}); |
| } |
|
|
| bool first = json_value(result, "oaicompat_token_ctr", 0) == 0; |
| std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL)); |
|
|
| bool stopped_word = json_value(result, "stopped_word", false); |
| bool stopped_eos = json_value(result, "stopped_eos", false); |
| bool stopped_limit = json_value(result, "stopped_limit", false); |
| std::string content = json_value(result, "content", std::string("")); |
|
|
| std::string finish_reason; |
| if (stopped_word || stopped_eos) { |
| finish_reason = "stop"; |
| } |
| if (stopped_limit) { |
| finish_reason = "length"; |
| } |
|
|
| std::time_t t = std::time(0); |
|
|
| json choices; |
|
|
| if (!finish_reason.empty()) { |
| choices = json::array({json{{"finish_reason", finish_reason}, |
| {"index", 0}, |
| {"delta", json::object()}}}); |
| } else { |
| if (first) { |
| if (content.empty()) { |
| choices = json::array({json{{"finish_reason", nullptr}, |
| {"index", 0}, |
| {"delta", json{{"role", "assistant"}}}}}); |
| } else { |
| |
| json initial_ret = json{{"choices", json::array({json{ |
| {"finish_reason", nullptr}, |
| {"index", 0}, |
| {"delta", json{ |
| {"role", "assistant"} |
| }}}})}, |
| {"created", t}, |
| {"id", completion_id}, |
| {"model", modelname}, |
| {"object", "chat.completion.chunk"}}; |
|
|
| json second_ret = json{ |
| {"choices", json::array({json{{"finish_reason", nullptr}, |
| {"index", 0}, |
| {"delta", json{ |
| {"content", content}}} |
| }})}, |
| {"created", t}, |
| {"id", completion_id}, |
| {"model", modelname}, |
| {"object", "chat.completion.chunk"}}; |
|
|
| return std::vector<json>({initial_ret, second_ret}); |
| } |
| } else { |
| |
| |
| if (content.empty()) { |
| return std::vector<json>({json::object()}); |
| } |
|
|
| choices = json::array({json{ |
| {"finish_reason", nullptr}, |
| {"index", 0}, |
| {"delta", |
| json{ |
| {"content", content}, |
| }}, |
| }}); |
| } |
| } |
|
|
| json ret = json { |
| {"choices", choices}, |
| {"created", t}, |
| {"id", completion_id}, |
| {"model", modelname}, |
| {"object", "chat.completion.chunk"} |
| }; |
|
|
| if (result.contains("timings")) { |
| ret.push_back({"timings", json_value(result, "timings", json::object())}); |
| } |
|
|
| if (!finish_reason.empty()) { |
| int num_tokens_predicted = json_value(result, "tokens_predicted", 0); |
| int num_prompt_tokens = json_value(result, "tokens_evaluated", 0); |
| ret.push_back({"usage", json { |
| {"completion_tokens", num_tokens_predicted}, |
| {"prompt_tokens", num_prompt_tokens}, |
| {"total_tokens", num_tokens_predicted + num_prompt_tokens} |
| }}); |
| } |
|
|
| return std::vector<json>({ret}); |
| } |
|
|
| static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) { |
| json data = json::array(); |
| int i = 0; |
| for (const auto & elem : embeddings) { |
| data.push_back(json{ |
| {"embedding", json_value(elem, "embedding", json::array())}, |
| {"index", i++}, |
| {"object", "embedding"} |
| }); |
| } |
|
|
| json res = json { |
| {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, |
| {"object", "list"}, |
| {"usage", json { |
| {"prompt_tokens", 0}, |
| {"total_tokens", 0} |
| }}, |
| {"data", data} |
| }; |
|
|
| return res; |
| } |
|
|
| static json format_response_rerank(const json & request, const json & ranks) { |
| json data = json::array(); |
| int i = 0; |
| for (const auto & rank : ranks) { |
| data.push_back(json{ |
| {"index", i++}, |
| {"relevance_score", json_value(rank, "score", 0.0)}, |
| }); |
| } |
|
|
| json res = json { |
| {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, |
| {"object", "list"}, |
| {"usage", json { |
| {"prompt_tokens", 0}, |
| {"total_tokens", 0} |
| }}, |
| {"results", data} |
| }; |
|
|
| return res; |
| } |
|
|
| static bool is_valid_utf8(const std::string & str) { |
| const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data()); |
| const unsigned char* end = bytes + str.length(); |
|
|
| while (bytes < end) { |
| if (*bytes <= 0x7F) { |
| |
| bytes++; |
| } else if ((*bytes & 0xE0) == 0xC0) { |
| |
| if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80) |
| return false; |
| bytes += 2; |
| } else if ((*bytes & 0xF0) == 0xE0) { |
| |
| if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80) |
| return false; |
| bytes += 3; |
| } else if ((*bytes & 0xF8) == 0xF0) { |
| |
| if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 || |
| (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80) |
| return false; |
| bytes += 4; |
| } else { |
| |
| return false; |
| } |
| } |
|
|
| return true; |
| } |
|
|
| static json format_tokenizer_response(const json & tokens) { |
| return json { |
| {"tokens", tokens} |
| }; |
| } |
|
|
| static json format_detokenized_response(const std::string & content) { |
| return json { |
| {"content", content} |
| }; |
| } |
|
|
| static json format_error_response(const std::string & message, const enum error_type type) { |
| std::string type_str; |
| int code = 500; |
| switch (type) { |
| case ERROR_TYPE_INVALID_REQUEST: |
| type_str = "invalid_request_error"; |
| code = 400; |
| break; |
| case ERROR_TYPE_AUTHENTICATION: |
| type_str = "authentication_error"; |
| code = 401; |
| break; |
| case ERROR_TYPE_NOT_FOUND: |
| type_str = "not_found_error"; |
| code = 404; |
| break; |
| case ERROR_TYPE_SERVER: |
| type_str = "server_error"; |
| code = 500; |
| break; |
| case ERROR_TYPE_PERMISSION: |
| type_str = "permission_error"; |
| code = 403; |
| break; |
| case ERROR_TYPE_NOT_SUPPORTED: |
| type_str = "not_supported_error"; |
| code = 501; |
| break; |
| case ERROR_TYPE_UNAVAILABLE: |
| type_str = "unavailable_error"; |
| code = 503; |
| break; |
| } |
| return json { |
| {"code", code}, |
| {"message", message}, |
| {"type", type_str}, |
| }; |
| } |
|
|