X Tutup
// good resources // https://opensearch.org/blog/improving-document-retrieval-with-sparse-semantic-encoders/ // https://huggingface.co/opensearch-project/opensearch-neural-sparse-encoding-v1 // // run with // text-embeddings-router --model-id opensearch-project/opensearch-neural-sparse-encoding-v1 --pooling splade #include #include #include #include #include #include #include #include using json = nlohmann::json; std::vector embed(const std::vector& inputs) { std::string url{"http://localhost:3000/embed_sparse"}; json data{ {"inputs", inputs} }; cpr::Response r = cpr::Post( cpr::Url{url}, cpr::Body{data.dump()}, cpr::Header{{"Content-Type", "application/json"}} ); if (r.status_code != 200) { throw std::runtime_error{"Bad status: " + std::to_string(r.status_code)}; } json response = json::parse(r.text); std::vector embeddings; for (const auto& item : response) { std::unordered_map map; for (const auto& e : item) { map.insert({e["index"], e["value"]}); } embeddings.emplace_back(pgvector::SparseVector{map, 30522}); } return embeddings; } int main() { pqxx::connection conn{"dbname=pgvector_example"}; pqxx::nontransaction tx{conn}; tx.exec("CREATE EXTENSION IF NOT EXISTS vector"); tx.exec("DROP TABLE IF EXISTS documents"); tx.exec("CREATE TABLE documents (id bigserial PRIMARY KEY, content text, embedding sparsevec(30522))"); std::vector input{ "The dog is barking", "The cat is purring", "The bear is growling" }; std::vector embeddings = embed(input); for (size_t i = 0; i < input.size(); i++) { tx.exec("INSERT INTO documents (content, embedding) VALUES ($1, $2)", pqxx::params{input[i], embeddings[i]}); } std::string query{"forest"}; pgvector::SparseVector query_embedding = embed({query})[0]; pqxx::result result = tx.exec("SELECT content FROM documents ORDER BY embedding <#> $1 LIMIT 5", pqxx::params{query_embedding}); for (const auto& row : result) { std::cout << row[0].as() << std::endl; } return 0; }
X Tutup