diff --git a/README.md b/README.md index b64ab5a..4219e97 100644 --- a/README.md +++ b/README.md @@ -1 +1,82 @@ # CodeAIdapter-server + +## Project Description + +CodeAIdapter is a tool designed to help developers solve programming problems more efficiently. We offer the following services: +1. Version conversion +2. Language conversion: conversion between different programming languages +3. Performance optimization +4. Program debugging: compilation errors +5. Program debugging: runtime errors + +Whether you are a beginner or an experienced developer, CodeAIdapter can help you tackle various programming challenges more easily! + +## Setup Instructions + +### Prerequisites + +- Python 3.10 or higher +- [Poetry](https://python-poetry.org/docs/#installation) for dependency management +- [Docker](https://docs.docker.com/get-docker/) for containerization +- [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) for GCP services + +### Installation + +1. Clone the repository: + ```sh + git clone https://github.com/CodeAIdapter/CodeAIdapter-server.git + cd CodeAIdapter-server + ``` + +2. Install dependencies: + ```sh + poetry install + ``` + +3. Create a `.env` file based on the `.env-example` file and fill in the required environment variables: + ```sh + cp .env-example .env + ``` + +4. Run the application: + ```sh + poetry run python app.py + ``` + +## Usage Examples + +### API Endpoint + +The main API endpoint is `/api`, which accepts POST requests with the following JSON payload: + +```json +{ + "prompt": "Your prompt here", + "file": "Your code file content here", + "filename": "Your code file name here" +} +``` + +### Example Request + +```sh +curl -X POST http://localhost:8080/api -H "Content-Type: application/json" -d '{ + "prompt": "Convert this Python code to Java", + "file": "print(\"Hello, World!\")", + "filename": "example.py" +}' +``` + +### Example Response + +```json +{ + "file": "public class Example { public static void main(String[] args) { System.out.println(\"Hello, World!\"); } }", + "filename": "Example.java", + "message": "Conversion successful" +} +``` + +## License + +This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. diff --git a/app.py b/app.py index 71eae06..d580b5a 100644 --- a/app.py +++ b/app.py @@ -25,10 +25,19 @@ @app.route("/") def index(): + """ + Index route that returns a simple greeting message. + """ return "Hello, World!" @app.route("/api", methods=["POST"]) def api_analyze(): + """ + API endpoint to analyze the provided code and return the appropriate response. + + Returns: + JSON response containing the analyzed result. + """ try: data = request.get_json(force=True) diff --git a/config.py b/config.py index d38edc7..a3eae84 100644 --- a/config.py +++ b/config.py @@ -1,13 +1,17 @@ import os from dotenv import load_dotenv +# Load environment variables from a .env file load_dotenv() class Config: - GCP_CREDENTIALS = os.environ.get("GCP_CREDENTIALS") - GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") - OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") - GCP_CLUSTER_NAME = os.environ.get("GCP_CLUSTER_NAME") - GCP_CLUSTER_ZONE = os.environ.get("GCP_CLUSTER_ZONE") - GCP_ARTIFACT_REGISTRY = os.environ.get("GCP_ARTIFACT_REGISTRY") - GCP_ARTIFACT_REGISTRY_REPO = os.environ.get("GCP_ARTIFACT_REGISTRY_REPO") \ No newline at end of file + """ + Configuration class to hold environment variables for the application. + """ + GCP_CREDENTIALS = os.environ.get("GCP_CREDENTIALS") # Path to GCP credentials JSON file + GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") # GCP project ID + OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") # OpenAI API key + GCP_CLUSTER_NAME = os.environ.get("GCP_CLUSTER_NAME") # GCP Kubernetes cluster name + GCP_CLUSTER_ZONE = os.environ.get("GCP_CLUSTER_ZONE") # GCP Kubernetes cluster zone + GCP_ARTIFACT_REGISTRY = os.environ.get("GCP_ARTIFACT_REGISTRY") # GCP Artifact Registry URL + GCP_ARTIFACT_REGISTRY_REPO = os.environ.get("GCP_ARTIFACT_REGISTRY_REPO") # GCP Artifact Registry repository name diff --git a/service/TSID.py b/service/TSID.py index d59101c..b3f9054 100644 --- a/service/TSID.py +++ b/service/TSID.py @@ -12,16 +12,31 @@ import json def detect_language(code): + """ + Detect the programming language of the given code. + + Args: + code (str): The code to analyze. + + Returns: + str: The detected programming language ("java", "python", or "unknown"). + """ if re.search(r"class\s+\w+|public\s+static\s+void\s+main", code): return "java" elif re.search(r"def\s+\w+|import\s+|print\s*\(", code): return "python" return "unknown" - - def run_code(response_json): + """ + Run the provided code in a Docker container based on the language. + + Args: + response_json (str): JSON string containing the code, language, Docker image, and class name. + Returns: + tuple: A tuple containing the output of the code execution and the return code. + """ response_dict = json.loads(response_json) # Parse the JSON string into a dictionary code = response_dict["code"] language = response_dict["language"] @@ -38,7 +53,6 @@ def run_code(response_json): with open(file_path, "w") as f: f.write(code) - # Build the Docker command to run the code if language == "java": # For Java, we assume the docker image contains the necessary tools to compile and run Java code @@ -64,9 +78,20 @@ def run_code(response_json): else: return run_result.stderr, run_result.returncode -def processing_tasks(code, language, task,user_prompt): +def processing_tasks(code, language, task, user_prompt): + """ + Process the given code based on the specified task and user prompt. - fixed_promt = ( + Args: + code (str): The code to process. + language (str): The programming language of the code. + task (str): The task to perform. + user_prompt (str): The user prompt. + + Returns: + str: JSON string containing the processed code, language, Docker image, and class name. + """ + fixed_prompt = ( "Please provide the following in response to the user's request:\n" "1. The executable code remove all comments(Be aware of the infinite loop and memory leak)\n" "2. The programming language of the code in all lowercase letters\n" @@ -77,12 +102,10 @@ def processing_tasks(code, language, task,user_prompt): ) if task[0] == 'B': - # result = run_code(code, language) dev_prompt = ( f"The following {language} code contains an error. Fix it so that it can runs successfully.\n\n" f"Code:\n{code}\n\n" - # f"Error:\n{result}\n\n" - f"{fixed_promt}" + f"{fixed_prompt}" ) else: if task[1] == '1': @@ -93,7 +116,7 @@ def processing_tasks(code, language, task,user_prompt): "Generate an executable Python script and ensure the execution result is the same as the original. " "Don't need to determine the version in the code.\n\n" f"Code:\n{code}\n\n" - f"{fixed_promt}" + f"{fixed_prompt}" ) elif language == "java": dev_prompt = ( @@ -102,7 +125,7 @@ def processing_tasks(code, language, task,user_prompt): "Generate an executable Java script and ensure the execution result is the same as the original. " "Don't need to determine the version in the code.\n\n" f"Code:\n{code}\n\n" - f"{fixed_promt}" + f"{fixed_prompt}" ) elif task[1] == '2': if language == "python": @@ -110,14 +133,14 @@ def processing_tasks(code, language, task,user_prompt): "Convert the following Python code to Java while preserving its logic and functionality. " "Ensure that all Python-specific constructs are properly adapted to Java idioms.\n\n" f"Code:\n{code}\n\n" - f"{fixed_promt}" + f"{fixed_prompt}" ) elif language == "java": dev_prompt = ( "Convert the following Java code to Python while preserving its logic and functionality. " "Ensure that all Java-specific constructs are properly adapted to Python idioms. " f"Code:\n{code}\n\n" - f"{fixed_promt}" + f"{fixed_prompt}" ) elif task[1] == '3': dev_prompt = ( @@ -125,21 +148,30 @@ def processing_tasks(code, language, task,user_prompt): "(such as reducing time or space complexity) while maintaining its functionality. " "Refer to the comments in the code if you need them.\n\n" f"Code:\n{code}\n\n" - f"{fixed_promt}" + f"{fixed_prompt}" ) - response_text = OpenAIChat.chat(dev_prompt,user_prompt) + response_text = OpenAIChat.chat(dev_prompt, user_prompt) response_json = re.sub(r"```(?:\w+)?\n?", "", response_text).strip("`") return response_json -def fix_code_with_llm(response_json, error_message,usr_prompt): +def fix_code_with_llm(response_json, error_message, user_prompt): + """ + Fix the provided code using a language model based on the error message. + Args: + response_json (str): JSON string containing the code, language, Docker image, and class name. + error_message (str): The error message encountered during code execution. + user_prompt (str): The user prompt. + Returns: + str: JSON string containing the fixed code, language, Docker image, and class name. + """ response_dict = json.loads(response_json) code = response_dict["code"] language = response_dict["language"] - fixed_promt = ( + fixed_prompt = ( "Please provide the following in response to the user's request:\n" "1. The executable code remove all comments(Be aware of the infinite loop and memory leak)\n" "2. The programming language of the code in all lowercase letters\n" @@ -153,15 +185,26 @@ def fix_code_with_llm(response_json, error_message,usr_prompt): f"This {language} code contains an error. Please help me fix it.\n\n" f"Code:\n{code}\n\n" f"Error:\n{error_message}\n\n" - f"{fixed_promt}" - + f"{fixed_prompt}" ) - response_text = OpenAIChat.chat(prompt,usr_prompt) + response_text = OpenAIChat.chat(prompt, user_prompt) response_json = re.sub(r"```(?:\w+)?\n?", "", response_text).strip("`") return response_json -def return_code_response(code_response,response_json,result,status): +def return_code_response(code_response, response_json, result, status): + """ + Return the code response based on the execution result and status. + + Args: + code_response (CodeResponse): The code response object to update. + response_json (str): JSON string containing the code, language, Docker image, and class name. + result (str): The result of the code execution. + status (int): The status code of the code execution. + + Returns: + CodeResponse: The updated code response object. + """ response_dict = json.loads(response_json) code = response_dict["code"] language = response_dict["language"] @@ -179,15 +222,22 @@ def return_code_response(code_response,response_json,result,status): else: code_response.error_msg = result code_response.status = False - # subprocess.run(f"docker rmi -f $(docker images --filter=reference='{docker_image}' -q)", shell=True) subprocess.run(f"docker rmi -f $(docker images -q)", shell=True) return code_response +def StartProcess(code, task, user_prompt): + """ + Start the process of handling the code based on the task and user prompt. -def StartProcess(code, task, usr_prompt): - + Args: + code (str): The code to process. + task (str): The task to perform. + user_prompt (str): The user prompt. + + Returns: + CodeResponse: The final code response object. + """ count = 0 - code_response = CodeResponse(file="", filename="", success_msg="", error_msg="", status=False) language = detect_language(code) @@ -196,28 +246,23 @@ def StartProcess(code, task, usr_prompt): code_response.status = False return code_response else: - - response_json = processing_tasks(code, language, task,usr_prompt) - - result,status = run_code(response_json) + response_json = processing_tasks(code, language, task, user_prompt) + result, status = run_code(response_json) - - print("result:",result,"status:",status) + print("result:", result, "status:", status) - while status == 1 and count < 3: + while status == 1 and count < 3: count += 1 print("Fixing error...") - response_json = fix_code_with_llm(response_json,result,usr_prompt) - result,status = run_code(response_json) + response_json = fix_code_with_llm(response_json, result, user_prompt) + result, status = run_code(response_json) - return return_code_response(code_response,response_json,result,status) - - + return return_code_response(code_response, response_json, result, status) if __name__ == "__main__": count = 0 input_file_path = "./TestCase/A1-2.py" # Path to the code file - usr_prompt = "Help me change the code to Python 2.7 compatible." + user_prompt = "Help me change the code to Python 2.7 compatible." with open(input_file_path, "r") as file: code = file.read() @@ -227,20 +272,15 @@ def StartProcess(code, task, usr_prompt): else: task = input("Please enter the task:") - response_json = processing_tasks(code, language, task,usr_prompt) - - - result,status = run_code(response_json) + response_json = processing_tasks(code, language, task, user_prompt) + result, status = run_code(response_json) - - print("result:",result,"status:",status) + print("result:", result, "status:", status) - while status == 1 and count < 3: + while status == 1 and count < 3: count += 1 print("Fixing error...") - response_json = fix_code_with_llm(response_json,result,usr_prompt) - result,status = run_code(response_json) + response_json = fix_code_with_llm(response_json, result, user_prompt) + result, status = run_code(response_json) - print("Execution Result:\n", result) - diff --git a/service/deploy/k8s.py b/service/deploy/k8s.py index 5fcceea..c2ba3b8 100644 --- a/service/deploy/k8s.py +++ b/service/deploy/k8s.py @@ -24,6 +24,9 @@ def change_dir(path: str): """ Context manager to change the current working directory and then restore it. + + Args: + path (str): The path to change to. """ original_dir = os.getcwd() os.chdir(path) @@ -142,6 +145,7 @@ def _execute_command(self, command: str, max_size: Optional[int] = None) -> bool Args: command (str): The command to execute. + max_size (Optional[int]): The maximum size of the output to log. Returns: bool: True if the command executes successfully; otherwise, False. @@ -267,4 +271,4 @@ def run(self) -> bool: return False self.logs.append("Deployment successful.") - return True \ No newline at end of file + return True diff --git a/utils/llm/openai.py b/utils/llm/openai.py index 99541c2..ff15889 100644 --- a/utils/llm/openai.py +++ b/utils/llm/openai.py @@ -3,28 +3,48 @@ from .base import LLMBase class OpenAIChat(LLMBase): + """ + A class to interact with OpenAI's chat models. + + Attributes: + _initialized (bool): Indicates if the OpenAI client has been initialized. + _client (openai.OpenAI): The OpenAI client instance. + DEFAULT_MODEL (str): The default model to use for chat completions. + """ _initialized = False _client = None DEFAULT_MODEL = "gpt-4o-mini-2024-07-18" @classmethod def _initialize(cls): + """ + Initialize the OpenAI client with the provided API key. + """ if not cls._initialized: cls._client = openai.OpenAI(api_key=Config.OPENAI_API_KEY) cls._initialized = True @classmethod - def chat(cls, dev_prompt:str, usr_prompt: str, model: str = None) -> str: + def chat(cls, dev_prompt: str, usr_prompt: str, model: str = None) -> str: + """ + Generate a chat completion response from the OpenAI model. + + Args: + dev_prompt (str): The developer's prompt message. + usr_prompt (str): The user's prompt message. + model (str, optional): The model to use for chat completions. Defaults to None. + + Returns: + str: The content of the chat completion response. + """ cls._initialize() model_name = model or cls.DEFAULT_MODEL response = cls._client.chat.completions.create( - model = model_name, - store = True, - messages = [ + model=model_name, + store=True, + messages=[ {"role": "developer", "content": dev_prompt}, {"role": "user", "content": usr_prompt} ] ) return response.choices[0].message.content - - \ No newline at end of file