Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
39 changes: 39 additions & 0 deletions .github/workflows/cppcheck.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
name: Static Analysis

on:
push:
branches: [main]
pull_request:

jobs:
cppcheck:
name: Run Cppcheck
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: true
lfs: true

- name: Install cppcheck
run: |
sudo apt-get update
sudo apt-get install -y cppcheck

- name: Run cppcheck
run: |
mkdir -p cppcheck-report
cppcheck --enable=all --inconclusive --quiet \
--output-file=cppcheck-report/cppcheck.txt \
$GITHUB_WORKSPACE/framework/src/ \
-I $GITHUB_WORKSPACE/include/ \
-I $GITHUB_WORKSPACE/framework/include/
cat cppcheck-report/cppcheck.txt

- name: Upload cppcheck report artifact
uses: actions/upload-artifact@v4
with:
name: cppcheck-report
path: cppcheck-report/cppcheck.txt
11 changes: 11 additions & 0 deletions MODULE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,17 @@ bazel_dep(name = "rules_python", version = "0.37.2")
bazel_dep(name = "platforms", version = "0.0.10")
bazel_dep(name = "googletest", version = "1.15.2")
bazel_dep(name = "apple_support", version = "1.17.1", repo_name = "build_bazel_apple_support")
bazel_dep(name = "curl", version = "8.8.0")
bazel_dep(name = "nlohmann_json", version = "3.11.3")
bazel_dep(name = "hedron_compile_commands", dev_dependency = True)
bazel_dep(name = "flatbuffers", version = "24.3.25")

# Hedron's Compile Commands Extractor for Bazel
git_override(
module_name = "hedron_compile_commands",
remote = "https://github.com/hedronvision/bazel-compile-commands-extractor.git",
commit = "4f28899228fb3ad0126897876f147ca15026151e",
)

# Use archive_override to patch rules_foreign_cc to default to specific cmake version
archive_override(
Expand Down
11,750 changes: 11,675 additions & 75 deletions MODULE.bazel.lock

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions framework/src/vx_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ vx_char targetModules[][VX_MAX_TARGET_NAME] = {
#endif
"openvx-c_model",
"openvx-onnxRT",
"openvx-ai-server",
"openvx-liteRT",
};

const vx_char extensions[] =
Expand Down
28 changes: 27 additions & 1 deletion include/VX/vx_corevx_ext.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/**
* @file vx_corevx_ext.h
* @brief Extensions enabled for corevs
* @brief Extensions enabled for corevx
* @version 0.1
* @date 2024-12-15
*
Expand All @@ -13,6 +13,24 @@
#include <VX/vx_kernels.h>
#include <VX/vx_types.h>

#ifdef __cplusplus
#include <string>

/*! \brief A character array (string) type.
* \note This is a C++ string type. It is not a C string.
* \ingroup group_basic_features
*/
using vx_string = std::string;
#endif /* __cplusplus */

/*! \brief The type enumeration lists additional types to extend the known types in OpenVX.
* \ingroup group_basic_features
*/
enum vx_type_ext_e
{
VX_TYPE_STRING = 0x818, /*!< \brief A <tt>\ref vx_string</tt>. */
};

/*! \brief Define Edge AI Vendor ID
* \ingroup group_basic_features
*/
Expand All @@ -30,6 +48,14 @@ enum vx_kernel_ext_e
* \brief The ONNX Runtime CPU Inference kernel.
*/
VX_KERNEL_ORT_CPU_INF = VX_KERNEL_BASE(VX_ID_EDGE_AI, VX_LIBRARY_KHR_BASE) + 0x1,
/*!
* \brief The AI Model Server Chatbot kernel.
*/
VX_KERNEL_AIS_CHATBOT = VX_KERNEL_BASE(VX_ID_EDGE_AI, VX_LIBRARY_KHR_BASE) + 0x2,
/*!
* \brief The LiteRT CPU Inference kernel.
*/
VX_KERNEL_LITERT_CPU_INF = VX_KERNEL_BASE(VX_ID_EDGE_AI, VX_LIBRARY_KHR_BASE) + 0x3,
};

/*! \brief addtitional tensor attributes.
Expand Down
20 changes: 20 additions & 0 deletions kernels/ai_server/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
cc_library(
name = "llm_kernels",
srcs = glob([
"*.cpp",
]),
hdrs = glob([
"*.h",
"*.hpp",
]),
includes = [
".",
"//framework/include"
],
deps = [
"//:corevx",
"@curl//:curl",
"@nlohmann_json//:json"
],
visibility = ["//visibility:public"]
)
110 changes: 110 additions & 0 deletions kernels/ai_server/chatbot.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/**
* @file chatbot.hpp
* @brief Kernel for AI Model Server Chatbot
* @version 0.1
* @date 2025-04-04
*
* @copyright Copyright (c) 2025
*
*/
#include <curl/curl.h>
#include <nlohmann/json.hpp>
#include <string>
#include <vector>
#include <VX/vx.h>

#define DEFAULT_MODEL "gpt-4o-mini"
#define SERVER_URL "http://localhost:8000"
#define API_KEY "hardcoded-api-key"

class RemoteModelClient
{
private:
// Helper function for non-streaming response
static size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp)
{
size_t totalSize = size * nmemb;
((std::string *)userp)->append((char *)contents, totalSize);
return totalSize;
}

public:
// kernel function (non-streaming)
vx_status AiServerQuery(const std::string &input_text, std::string &output_text, const std::string &api_path)
{
CURL *curl = curl_easy_init();
if (!curl)
return VX_FAILURE;

nlohmann::json request_json = {
{"model", DEFAULT_MODEL},
{"messages", {{{"role", "user"}, {"content", input_text}}}},
{"max_tokens", 100},
{"stream", false}};

std::string request_payload = request_json.dump();
std::string response_string;
std::string api_url = std::string(SERVER_URL) + api_path;

struct curl_slist *headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/json");
headers = curl_slist_append(headers, ("Authorization: Bearer " + std::string(API_KEY)).c_str());

curl_easy_setopt(curl, CURLOPT_URL, api_url.c_str());
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_payload.c_str());
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response_string);

CURLcode res = curl_easy_perform(curl);
curl_slist_free_all(headers);
curl_easy_cleanup(curl);

if (res != CURLE_OK)
return VX_FAILURE;

auto json_response = nlohmann::json::parse(response_string);
output_text = json_response["choices"][0]["message"]["content"];

return VX_SUCCESS;
}

// kernel function (streaming)
vx_status AiServerQueryStream(const std::string &input_text, std::string &output_text, const std::string &api_path)
{
CURL *curl = curl_easy_init();
if (!curl)
return VX_FAILURE;

nlohmann::json request_json = {
{"model", DEFAULT_MODEL},
{"messages", {{{"role", "user"}, {"content", input_text}}}},
{"max_tokens", 100},
{"stream", true}};

std::string request_payload = request_json.dump();
std::string response_chunk;
std::string api_url = std::string(SERVER_URL) + api_path;

struct curl_slist *headers = nullptr;
headers = curl_slist_append(headers, "Content-Type: application/json");
headers = curl_slist_append(headers, ("Authorization: Bearer " + std::string(API_KEY)).c_str());

curl_easy_setopt(curl, CURLOPT_URL, api_url.c_str());
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_payload.c_str());
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response_chunk);

CURLcode res = curl_easy_perform(curl);
curl_slist_free_all(headers);
curl_easy_cleanup(curl);

if (res != CURLE_OK)
return VX_FAILURE;

// Just return raw streamed response (newline-delimited JSON chunks)
output_text = response_chunk;
return VX_SUCCESS;
}
};
21 changes: 21 additions & 0 deletions kernels/liteRT/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@

cc_library(
name = "liteRT_kernels",
srcs = glob([
"*.cpp",
]),
hdrs = glob([
"*.h",
"*.hpp",
]),
includes = [
".",
"//framework/include",
],
deps = [
"//:corevx",
"//third_party:tflite",
"//third_party:tflite-hdrs",
],
visibility = ["//visibility:public"]
)
Loading