Skip to content

Commit

Permalink
v4.2.0
Browse files Browse the repository at this point in the history
  • Loading branch information
AstraBert committed Jun 18, 2024
1 parent 7ec28b7 commit d8cc4f2
Show file tree
Hide file tree
Showing 5 changed files with 49 additions and 6 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
<img src="https://img.shields.io/github/languages/top/AstraBert/everything-ai" alt="GitHub top language">
<img src="https://img.shields.io/github/commit-activity/t/AstraBert/everything-ai" alt="GitHub commit activity">
<img src="https://img.shields.io/badge/everything_ai-stable-green" alt="Static Badge">
<img src="https://img.shields.io/badge/Release-v4.1.0-purple" alt="Static Badge">
<img src="https://img.shields.io/badge/Release-v4.2.0-purple" alt="Static Badge">
<img src="https://img.shields.io/docker/image-size/astrabert/everything-ai
" alt="Docker image size">
<img src="https://img.shields.io/badge/Supported_platforms-Windows/macOS-brown" alt="Static Badge">
Expand Down Expand Up @@ -74,6 +74,7 @@ Choose the task among:
- *llama.cpp-and-qdrant*: same as *retrieval-text-generation*, but uses **llama.cpp** as inference engine, so you MUST NOT specify a model - **MULTILINGUAL**
- *build-your-llm*: Build a customizable chat LLM combining a Qdrant database with your PDFs and the power of Anthropic, OpenAI, Cohere or Groq models: you just need an API key! To build the Qdrant database, have to pass either a pdf/a bunch of pdfs specified as comma-separated paths or a directory where all the pdfs of interest are stored (**DO NOT** provide both); you can also specify the language in which the PDF is written, using [ISO nomenclature](https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes) - **MULTILINGUAL**, **LANGFUSE INTEGRATION**
- *simply-chatting*: Build a customizable chat LLM with the power of Anthropic, OpenAI, Cohere or Groq models (no RAG pipeline): you just need an API key! - **MULTILINGUAL**, **LANGFUSE INTEGRATION**
- *fal-img2img*: Use [fal.ai](https://fal.ai) ComfyUI API to generate images starting from yur PNG and JPEG images: you just need an API key! You can aklso customize the generation working with prompts and seeds - **ENGLISH ONLY**
- *image-retrieval-search*: search an image database uploading a folder as database input. The folder should have the following structure:

```
Expand Down
2 changes: 2 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ WORKDIR /app
# Add the current directory contents into the container at /app
ADD . /app

RUN pip install fal_client

# Expose the port that the application will run on
EXPOSE 8760

Expand Down
39 changes: 39 additions & 0 deletions docker/fal_img2img.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import asyncio
import fal_client
import os
import gradio as gr
from PIL import Image

MAP_EXTS = {"jpg": "jpeg", "jpeg": "jpeg", "png": "png"}

async def submit(image_path, prompt, seed):
ext = image_path.split(".")[1]
handler = await fal_client.submit_async(
"comfy/astrabert/image2image",
arguments={
"ksampler_seed": seed,
"cliptextencode_text": prompt,
"image_load_image_path": f"data:image/{MAP_EXTS[ext]};base64,{image_path}"
},
)
result = await handler.get()
return result

def get_url(results):
url = results['outputs'][list(results['outputs'].keys())[0]]['images'][0]['url']
nm = results['outputs'][list(results['outputs'].keys())[0]]['images'][0]['filename']
return f"![{nm}]({url})"


def render_image(api_key, image_path, prompt, seed):
os.environ["FAL_KEY"] = api_key
results = asyncio.run(submit(image_path, prompt, int(seed)))
url = get_url(results)
img = Image.open(image_path)
return img, url


demo = gr.Interface(render_image, inputs=[gr.Textbox(label="API key", type="password", value="fal-******************"), gr.File(label="PNG/JPEG Image"), gr.Textbox(label="Prompt", info="Specify how you would like the image generation to be"), gr.Textbox(label="Seed", info="Pass your seed here (if not interested, leave it as it is)", value="123498235498246")], outputs=[gr.Image(label="Your Base Image"), gr.Markdown(label="Generated Image")], title="everything-ai-img2img")

if __name__=="__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
11 changes: 6 additions & 5 deletions docker/select_and_run.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import subprocess as sp
import gradio as gr

TASK_TO_SCRIPT = {"retrieval-text-generation": "retrieval_text_generation.py", "agnostic-text-generation": "agnostic_text_generation.py", "text-summarization": "text_summarization.py", "image-generation": "image_generation.py", "image-generation-pollinations": "image_generation_pollinations.py", "image-classification": "image_classification.py", "image-to-text": "image_to_text.py", "retrieval-image-search": "retrieval_image_search.py", "protein-folding": "protein_folding_with_esm.py", "video-generation": "video_generation.py", "speech-recognition": "speech_recognition.py", "spaces-api-supabase": "spaces_api_supabase.py", "audio-classification": "audio_classification.py", "autotrain": "autotrain_interface.py", "llama.cpp-and-qdrant": "llama_cpp_int.py", "build-your-llm": "build_your_llm.py", "simply-chatting": "chat_your_llm.py"}
TASK_TO_SCRIPT = {"retrieval-text-generation": "retrieval_text_generation.py", "agnostic-text-generation": "agnostic_text_generation.py", "text-summarization": "text_summarization.py", "image-generation": "image_generation.py", "image-generation-pollinations": "image_generation_pollinations.py", "image-classification": "image_classification.py", "image-to-text": "image_to_text.py", "retrieval-image-search": "retrieval_image_search.py", "protein-folding": "protein_folding_with_esm.py", "video-generation": "video_generation.py", "speech-recognition": "speech_recognition.py", "spaces-api-supabase": "spaces_api_supabase.py", "audio-classification": "audio_classification.py", "autotrain": "autotrain_interface.py", "llama.cpp-and-qdrant": "llama_cpp_int.py", "build-your-llm": "build_your_llm.py", "simply-chatting": "chat_your_llm.py", "fal-img2img": "fal_img2img.py"}


def build_command(tsk, mod="None", pdff="None", dirs="None", lan="None", imdim="512", gradioclient="None", supabaseurl="None", collectname="None", supenc="all-MiniLM-L6-v2", supdim="384"):
if tsk != "retrieval-text-generation" and tsk != "image-generation-pollinations" and tsk != "retrieval-image-search" and tsk != "autotrain" and tsk != "protein-folding" and tsk != "spaces-api-supabase" and tsk != "llama.cpp-and-qdrant" and tsk!="build-your-llm":
if tsk != "retrieval-text-generation" and tsk != "image-generation-pollinations" and tsk != "retrieval-image-search" and tsk != "autotrain" and tsk != "protein-folding" and tsk != "spaces-api-supabase" and tsk != "llama.cpp-and-qdrant" and tsk!="build-your-llm" and tsk!="simply-chatting" and tsk!="fal-img2img":
sp.run(f"python3 {TASK_TO_SCRIPT[tsk]} -m {mod}", shell=True)
return f"python3 {TASK_TO_SCRIPT[tsk]} -m {mod}"
elif tsk == "retrieval-text-generation":
Expand All @@ -14,7 +14,7 @@ def build_command(tsk, mod="None", pdff="None", dirs="None", lan="None", imdim="
elif tsk == "llama.cpp-and-qdrant" or tsk== "build-your-llm":
sp.run(f"python3 {TASK_TO_SCRIPT[tsk]} -pf '{pdff}' -d '{dirs}' -l '{lan}'", shell=True)
return f"python3 {TASK_TO_SCRIPT[tsk]} -pf '{pdff}' -d '{dirs}' -l '{lan}'"
elif tsk == "image-generation-pollinations" or tsk == "autotrain" or tsk == "protein-folding" or tsk=="simply-chatting":
elif tsk == "image-generation-pollinations" or tsk == "autotrain" or tsk == "protein-folding" or tsk=="simply-chatting" or tsk=="fal-img2img":
sp.run(f"python3 {TASK_TO_SCRIPT[tsk]}", shell=True)
return f"python3 {TASK_TO_SCRIPT[tsk]}"
elif tsk == "spaces-api-supabase":
Expand All @@ -34,7 +34,7 @@ def build_command(tsk, mod="None", pdff="None", dirs="None", lan="None", imdim="
label="Task",
info="Task you want your assistant to help you with",
lines=3,
value=f"Choose one of the following: {','.join(list(TASK_TO_SCRIPT.keys()))}; if you choose 'image-generation-pollinations' or 'autotrain' or 'protein-folding' or 'simply-chatting', you do not need to specify anything else. If you choose 'spaces-api-supabase' you need to specify the Spaces API client, the database URL, the collection name, the Sentence-Transformers encoder used to upload the vectors to the Supabase database and the vectors size (optionally also the language)",
value=f"Choose one of the following: {','.join(list(TASK_TO_SCRIPT.keys()))}; if you choose 'image-generation-pollinations' or 'autotrain' or 'protein-folding' or 'simply-chatting' or 'fal-img2img', you do not need to specify anything else. If you choose 'spaces-api-supabase' you need to specify the Spaces API client, the database URL, the collection name, the Sentence-Transformers encoder used to upload the vectors to the Supabase database and the vectors size (optionally also the language)",
),
gr.Textbox(
label="Model",
Expand Down Expand Up @@ -98,7 +98,8 @@ def build_command(tsk, mod="None", pdff="None", dirs="None", lan="None", imdim="
),
],
outputs="textbox",
theme=gr.themes.Base()
theme=gr.themes.Base(),
title="everything-ai"
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=8760, share=False)
Expand Down
Binary file modified imgs/everything-ai.drawio.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit d8cc4f2

Please sign in to comment.