diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6769e21 --- /dev/null +++ b/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ \ No newline at end of file diff --git a/LICENSE b/LICENSE index 654fb36..1de6adc 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2024 N3N +Copyright (c) 2024 Vidura Labs Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index a775921..6da8c05 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,89 @@ # promptml-cli -A CLI application to parse Prompt Markup Language +A CLI application to run PromptML scripts against LLMs. + +## Installation +```bash +pip install --upgrade promptml-cli +``` + +## Usage +```bash +promptml-cli --help + +usage: promptml-cli [-h] -f FILE [-m MODEL] [-s {xml,json,yaml}] [-p {openai,google}] + +A Command Line Interface tool to run PromptML files with popular Generative AI models + +optional arguments: + -h, --help show this help message and exit + -f FILE, --file FILE Path to the PromptML file + -m MODEL, --model MODEL + Model to use for the completion + -s {xml,json,yaml}, --serializer {xml,json,yaml} + Serializer to use for the completion. Default is `xml` + -p {openai,google}, --provider {openai,google} + GenAI provider to use for the completion. Default is `openai` +``` + +1. Create a PromptML file `character.pml` with the following content: + +```promptml +@prompt + @context + You are a millitary general in Roman army. + @end + + @objective + Describe a regular day in your life. + @end + + @instructions + @step + Be cheeky and sarcastic. + @end + @end + + @category + Simulation + @end +@end +``` +See PromptML [documentation](https://www.promptml.org/) for more details. + +2. Set your OpenAI API key as an environment variable: + +```bash +export OPEN_AI_API_KEY=your-openai-api-key +``` + +3. Run the PromptML file with the following command in terminal: + +```bash +promptml-cli -f character.pml -m gpt-4o +``` + +You will see the respective output on terminal: + +```info +Ah, so you want a glimpse into the life of a Roman general, do you? Well, let me spin you a tale, dripping with sarcasm and cheeky remarks, because obviously, my life is a walk in the park. Shall we? + +The day usually starts with the soothing sounds of soldiers clanging their swords and shields together at some ungodly hour. Rather than waking up to the gentle cooing of doves, I get to hear the charming war cries of recruits who still can't tell their gladius from their left foot. Delightful, isn't it? + +After I drag myself out of what I'm convinced is a sack filled with rocks they call a bed, it's straight to the strategy tent. Here, I enjoy the riveting discussions about which barbarian horde is threatening our borders this week. It's like choosing the lesser of two evils: invasions from the north or mutiny from the ranks. Decisions, decisions! + +Next on the agenda is overseeing training. Oh yes, I just love watching greenhorns stumble through basic drills. The way they handle their weapons – you'd think a lopsided stick had suddenly become the deadliest thing in their hands. But hey, a general's got to humor them, right? + +Then there's the daily feast of dried meat and stale bread, washed down with wine that's likely been used as paint thinner. Ah, the joys of Roman culinary delights. I'm sure Bacchus himself is weeping with laughter somewhere. + +Afternoons are reserved for dealing with the Senate's missives, those beautifully crafted scrolls filled with ‘helpful’ suggestions and veiled threats. It's like mail time with a hint of doomsday. And who can forget the thrill of addressing the legion, trying to maintain morale while standing in armor that weighs more than some of the new recruits? + +As evening falls, I get to review the day's progress with my centurions, who conveniently bring me the freshest of problems right before bedtime. If I’m lucky, I'll dodge an assassination attempt or two – keeps life exciting, don’t you think? + +Finally, I retire for the night, eager to wake up and do it all over again. Really, what's not to love? So there you have it! Just another average day in the life of a Roman general – a blend of strategy, sarcasm, and just a dash of masochism. + +Time taken: 6.006012916564941 seconds +``` + +## TODO +- Add support for Claude, Cohere & A21 Labs GenAI models +- Add tests diff --git a/character.pml b/character.pml new file mode 100644 index 0000000..e8bbf05 --- /dev/null +++ b/character.pml @@ -0,0 +1,18 @@ +@prompt + @context + You are a millitary general in Roman army. + @end + + @objective + Describe a regular day in your life. + @end + + @instructions + @step + Be cheeky and sarcastic. + @end + @end + @category + Simulation + @end +@end diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..90a9575 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,86 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "promptml-cli" +dynamic = ["version"] +description = 'A CLI tool to run PromptML scripts' +readme = "README.md" +requires-python = ">=3.8" +license = "MIT" +keywords = [ + "artificial-intelligence", + "prompt-engineering", + "dsl", + "language", + "generative-ai", + "promptml", + "promptml-cli", +] +authors = [ + { name = "Vidura Labs Inc.", email = "contact@vidura.ai" }, + { name = "Naren Yellavula", email = "naren.yellavula@gmail.com" }, +] + +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] + +dependencies = [ + "promptml==0.6.1", + "openai==1.33.0", + "rich==13.7.1", + "google-generativeai==0.6.0", +] + +[project.urls] +Documentation = "https://github.com/narenaryan/promptml-cli/blob/main/README.md" +Issues = "https://github.com/narenaryan/promptml-cli/issues" +Source = "https://github.com/narenaryan/promptml-cli/" + +[tool.hatch.version] +path = "src/promptml_cli/__about__.py" + +[tool.hatch.envs.default] +dependencies = ["coverage[toml]>=6.5", "pytest"] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = ["- coverage combine", "coverage report"] +cov = ["test-cov", "cov-report"] + +[[tool.hatch.envs.all.matrix]] +python = ["3.8", "3.9", "3.10", "3.11", "3.12"] + +[tool.hatch.envs.types] +dependencies = ["mypy>=1.0.0"] +[tool.hatch.envs.types.scripts] +check = "mypy --install-types --non-interactive {args:src/promptml_cli tests}" + +[tool.coverage.run] +source_pkgs = ["promptml_cli", "tests"] +branch = true +parallel = true +omit = ["src/promptml_cli/__about__.py"] + +[tool.coverage.paths] +promptml-cli = ["src/promptml_cli"] +tests = ["tests", "*/promptml_cli/tests"] + +[tool.coverage.report] +exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] + +[project.scripts] +promptml-cli = "promptml_cli.main:run" + +[tool.hatch.build.targets.wheel] +packages = ["src/promptml_cli"] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..68ca9bd --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +promptml==0.6.1 +openai==1.33.0 +rich==13.7.1 +google-generativeai==0.6.0 diff --git a/src/promptml_cli/__about__.py b/src/promptml_cli/__about__.py new file mode 100644 index 0000000..2532151 --- /dev/null +++ b/src/promptml_cli/__about__.py @@ -0,0 +1,2 @@ +# pylint: disable=invalid-name +version = "0.1.0" diff --git a/src/promptml_cli/__init__.py b/src/promptml_cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/promptml_cli/client.py b/src/promptml_cli/client.py new file mode 100644 index 0000000..a8047e0 --- /dev/null +++ b/src/promptml_cli/client.py @@ -0,0 +1,28 @@ + +import os +import enum +from typing import Union + +from openai import OpenAI +import google.generativeai as genai + + +class Provider(enum.Enum): + OPENAI = "openai" + GOOGLE = "google" + +class ClientFactory: + def __init__(self, provider: str, model: str=""): + self.provider = provider + self.model = model + + def get_client(self) -> Union[OpenAI, genai.GenerativeModel, None]: + if self.provider == Provider.OPENAI.value: + return OpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), + ) + elif self.provider == Provider.GOOGLE.value: + genai.configure(api_key=os.environ.get("GOOGLE_API_KEY")) + return genai.GenerativeModel(self.model) + else: + return None diff --git a/src/promptml_cli/main.py b/src/promptml_cli/main.py new file mode 100644 index 0000000..cddcf09 --- /dev/null +++ b/src/promptml_cli/main.py @@ -0,0 +1,86 @@ +import argparse +import enum +import os +import time + +from openai import APIConnectionError +from promptml.parser import PromptParserFromFile +from rich.markdown import Markdown +from rich.console import Console +from typing import Union + +from promptml_cli.client import Provider, ClientFactory + +def run(): + # Take user input of following arguments + # 1. --file, -f : Path to the PromptML file + # 2. --model, -m : Model to use for the completion + # 3. --serializer, -s : Serializer to use for the completion + console = Console( + color_system="truecolor" + ) + arg_parser = argparse.ArgumentParser( + prog='promptml-cli', + description='A Command Line Interface tool to run PromptML files with popular Generative AI models', + epilog='-----------------------------' + ) + + arg_parser.add_argument('-f', '--file', type=str, help='Path to the PromptML(.pml) file', required=True) + arg_parser.add_argument('-m', '--model', type=str, help='Model to use for the completion', default='gpt-4o') + arg_parser.add_argument('-s', '--serializer', type=str, help='Serializer to use for the completion. Default is `xml`', default='xml', choices=['xml', 'json', 'yaml']) + arg_parser.add_argument('-p', '--provider', type=str, help='GenAI provider to use for the completion. Default is `openai`', default=Provider.OPENAI.value, choices=[Provider.OPENAI.value, Provider.GOOGLE.value]) + + + args = arg_parser.parse_args() + + # Parse the PromptML file + + parser = PromptParserFromFile(args.file) + parser.parse() + + serialized_data = None + + if args.serializer == "xml": + serialized_data = parser.to_xml() + elif args.serializer == "json": + serialized_data = parser.to_json() + elif args.serializer == "yaml": + serialized_data = parser.to_yaml() + else: + serialized_data = parser.to_xml() + + + now = time.time() + response = "" + if args.provider == Provider.GOOGLE.value: + if args.model == "gpt-4o": + args.model = "gemini-1.5-flash-latest" + + g_client = ClientFactory(Provider.GOOGLE.value, model=args.model).get_client() + response = g_client.generate_content(serialized_data).text + elif args.provider == Provider.OPENAI.value: + openai_client = ClientFactory(Provider.OPENAI.value, model=args.model).get_client() + try: + chat_completion = openai_client.chat.completions.create( + messages=[ + { + "role": "user", + "content": serialized_data, + }, + ], + model=args.model, + ) + response = chat_completion.choices[0].message.content + except APIConnectionError: + console.print( + "Error connecting to OpenAI API. Try again!", + style = "bold red" + ) + return + + # Print the completion with rich console + console.print(Markdown(response, "\n"), soft_wrap=True, new_line_start=True) + console.print(f"Time taken: {time.time() - now} seconds", style="bold green") + +if __name__ == '__main__': + run() diff --git a/tests/.gitkeep b/tests/.gitkeep new file mode 100644 index 0000000..e69de29