Spaces:
Running
Running
machine
#62
by
bartdevelop3r
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- .dockerignore +0 -67
- .gitattributes +35 -1
- .gitignore +105 -37
- .pre-commit-config.yaml +32 -0
- .python-version +1 -0
- .vscode/extensions.json +8 -0
- .vscode/settings.json +16 -0
- Dockerfile +0 -105
- README.md +9 -133
- app.py +54 -0
- app_allenai.py +67 -0
- app_cerebras.py +19 -0
- app_claude.py +21 -0
- app_cohere.py +21 -0
- app_compare.py +210 -0
- app_crew.py +8 -0
- app_deepseek.py +23 -0
- app_experimental.py +300 -0
- app_fal.py +16 -0
- app_fireworks.py +19 -0
- app_gemini.py +22 -0
- app_gemini_camera.py +23 -0
- app_gemini_coder.py +23 -0
- app_gemini_voice.py +23 -0
- app_groq.py +21 -0
- app_groq_coder.py +23 -0
- app_hf.py +17 -0
- app_huggingface.py +22 -0
- app_hyperbolic.py +19 -0
- app_hyperbolic_coder.py +20 -0
- app_langchain.py +23 -0
- app_lumaai.py +7 -0
- app_marco_o1.py +12 -0
- app_meta.py +6 -0
- app_mindsearch.py +12 -0
- app_minimax.py +22 -0
- app_minimax_coder.py +23 -0
- app_mistral.py +23 -0
- app_moondream.py +13 -0
- app_nvidia.py +22 -0
- app_nvidia_coder.py +23 -0
- app_omini.py +10 -0
- app_openai.py +21 -0
- app_openai_coder.py +22 -0
- app_openai_voice.py +23 -0
- app_openrouter.py +22 -0
- app_paligemma.py +78 -0
- app_perplexity.py +23 -0
- app_playai.py +10 -0
- app_qwen.py +19 -0
.dockerignore
DELETED
|
@@ -1,67 +0,0 @@
|
|
| 1 |
-
# Python
|
| 2 |
-
__pycache__/
|
| 3 |
-
*.py[cod]
|
| 4 |
-
*$py.class
|
| 5 |
-
*.so
|
| 6 |
-
.Python
|
| 7 |
-
venv/
|
| 8 |
-
.venv/
|
| 9 |
-
ENV/
|
| 10 |
-
env/
|
| 11 |
-
*.egg-info/
|
| 12 |
-
dist/
|
| 13 |
-
build/
|
| 14 |
-
|
| 15 |
-
# Node
|
| 16 |
-
node_modules/
|
| 17 |
-
npm-debug.log*
|
| 18 |
-
yarn-debug.log*
|
| 19 |
-
yarn-error.log*
|
| 20 |
-
.pnpm-debug.log*
|
| 21 |
-
|
| 22 |
-
# Next.js
|
| 23 |
-
frontend/.next/
|
| 24 |
-
frontend/out/
|
| 25 |
-
frontend/build/
|
| 26 |
-
|
| 27 |
-
# Git
|
| 28 |
-
.git/
|
| 29 |
-
.gitignore
|
| 30 |
-
|
| 31 |
-
# IDE
|
| 32 |
-
.vscode/
|
| 33 |
-
.idea/
|
| 34 |
-
*.swp
|
| 35 |
-
*.swo
|
| 36 |
-
*~
|
| 37 |
-
|
| 38 |
-
# OS
|
| 39 |
-
.DS_Store
|
| 40 |
-
Thumbs.db
|
| 41 |
-
|
| 42 |
-
# Documentation
|
| 43 |
-
*.md
|
| 44 |
-
!README.md
|
| 45 |
-
|
| 46 |
-
# Docker
|
| 47 |
-
Dockerfile*
|
| 48 |
-
docker-compose*.yml
|
| 49 |
-
.dockerignore
|
| 50 |
-
|
| 51 |
-
# Logs
|
| 52 |
-
*.log
|
| 53 |
-
logs/
|
| 54 |
-
log/
|
| 55 |
-
|
| 56 |
-
# Generated
|
| 57 |
-
generated_projects/
|
| 58 |
-
|
| 59 |
-
# Tests
|
| 60 |
-
test/
|
| 61 |
-
tests/
|
| 62 |
-
__tests__/
|
| 63 |
-
|
| 64 |
-
# Lock files (will be regenerated)
|
| 65 |
-
uv.lock
|
| 66 |
-
poetry.lock
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitattributes
CHANGED
|
@@ -1 +1,35 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
|
|
|
|
|
| 1 |
# Byte-compiled / optimized / DLL files
|
| 2 |
__pycache__/
|
| 3 |
*.py[cod]
|
|
@@ -14,26 +16,21 @@ dist/
|
|
| 14 |
downloads/
|
| 15 |
eggs/
|
| 16 |
.eggs/
|
| 17 |
-
|
| 18 |
-
/
|
| 19 |
-
/lib64/
|
| 20 |
-
venv/lib/
|
| 21 |
-
venv/lib64/
|
| 22 |
parts/
|
| 23 |
sdist/
|
| 24 |
var/
|
|
|
|
|
|
|
| 25 |
*.egg-info/
|
| 26 |
.installed.cfg
|
| 27 |
*.egg
|
| 28 |
MANIFEST
|
| 29 |
|
| 30 |
-
# Virtual environments
|
| 31 |
-
venv/
|
| 32 |
-
env/
|
| 33 |
-
ENV/
|
| 34 |
-
.venv/
|
| 35 |
-
|
| 36 |
# PyInstaller
|
|
|
|
|
|
|
| 37 |
*.manifest
|
| 38 |
*.spec
|
| 39 |
|
|
@@ -51,44 +48,115 @@ htmlcov/
|
|
| 51 |
nosetests.xml
|
| 52 |
coverage.xml
|
| 53 |
*.cover
|
|
|
|
| 54 |
.hypothesis/
|
| 55 |
.pytest_cache/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
# Jupyter Notebook
|
| 58 |
.ipynb_checkpoints
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
# pyenv
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# mypy
|
| 64 |
.mypy_cache/
|
| 65 |
.dmypy.json
|
|
|
|
| 66 |
|
| 67 |
# Pyre type checker
|
| 68 |
.pyre/
|
| 69 |
|
| 70 |
-
#
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
.
|
| 78 |
-
.
|
| 79 |
-
.
|
| 80 |
-
.
|
| 81 |
-
|
| 82 |
-
.fastrtc_docs_last_update.txt
|
| 83 |
-
|
| 84 |
-
# System files
|
| 85 |
-
.DS_Store
|
| 86 |
-
Thumbs.db
|
| 87 |
-
|
| 88 |
-
# Lock files
|
| 89 |
-
uv.lock
|
| 90 |
-
poetry.lock
|
| 91 |
-
Pipfile.lock
|
| 92 |
-
|
| 93 |
-
# VSCode
|
| 94 |
-
.vscode/
|
|
|
|
| 1 |
+
.gradio/
|
| 2 |
+
|
| 3 |
# Byte-compiled / optimized / DLL files
|
| 4 |
__pycache__/
|
| 5 |
*.py[cod]
|
|
|
|
| 16 |
downloads/
|
| 17 |
eggs/
|
| 18 |
.eggs/
|
| 19 |
+
lib/
|
| 20 |
+
lib64/
|
|
|
|
|
|
|
|
|
|
| 21 |
parts/
|
| 22 |
sdist/
|
| 23 |
var/
|
| 24 |
+
wheels/
|
| 25 |
+
share/python-wheels/
|
| 26 |
*.egg-info/
|
| 27 |
.installed.cfg
|
| 28 |
*.egg
|
| 29 |
MANIFEST
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
# PyInstaller
|
| 32 |
+
# Usually these files are written by a python script from a template
|
| 33 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 34 |
*.manifest
|
| 35 |
*.spec
|
| 36 |
|
|
|
|
| 48 |
nosetests.xml
|
| 49 |
coverage.xml
|
| 50 |
*.cover
|
| 51 |
+
*.py,cover
|
| 52 |
.hypothesis/
|
| 53 |
.pytest_cache/
|
| 54 |
+
cover/
|
| 55 |
+
|
| 56 |
+
# Translations
|
| 57 |
+
*.mo
|
| 58 |
+
*.pot
|
| 59 |
+
|
| 60 |
+
# Django stuff:
|
| 61 |
+
*.log
|
| 62 |
+
local_settings.py
|
| 63 |
+
db.sqlite3
|
| 64 |
+
db.sqlite3-journal
|
| 65 |
+
|
| 66 |
+
# Flask stuff:
|
| 67 |
+
instance/
|
| 68 |
+
.webassets-cache
|
| 69 |
+
|
| 70 |
+
# Scrapy stuff:
|
| 71 |
+
.scrapy
|
| 72 |
+
|
| 73 |
+
# Sphinx documentation
|
| 74 |
+
docs/_build/
|
| 75 |
+
|
| 76 |
+
# PyBuilder
|
| 77 |
+
.pybuilder/
|
| 78 |
+
target/
|
| 79 |
|
| 80 |
# Jupyter Notebook
|
| 81 |
.ipynb_checkpoints
|
| 82 |
|
| 83 |
+
# IPython
|
| 84 |
+
profile_default/
|
| 85 |
+
ipython_config.py
|
| 86 |
+
|
| 87 |
# pyenv
|
| 88 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 89 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 90 |
+
# .python-version
|
| 91 |
+
|
| 92 |
+
# pipenv
|
| 93 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 94 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 95 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 96 |
+
# install all needed dependencies.
|
| 97 |
+
#Pipfile.lock
|
| 98 |
+
|
| 99 |
+
# poetry
|
| 100 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 101 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 102 |
+
# commonly ignored for libraries.
|
| 103 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 104 |
+
#poetry.lock
|
| 105 |
+
|
| 106 |
+
# pdm
|
| 107 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 108 |
+
#pdm.lock
|
| 109 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 110 |
+
# in version control.
|
| 111 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 112 |
+
.pdm.toml
|
| 113 |
+
|
| 114 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 115 |
+
__pypackages__/
|
| 116 |
+
|
| 117 |
+
# Celery stuff
|
| 118 |
+
celerybeat-schedule
|
| 119 |
+
celerybeat.pid
|
| 120 |
+
|
| 121 |
+
# SageMath parsed files
|
| 122 |
+
*.sage.py
|
| 123 |
+
|
| 124 |
+
# Environments
|
| 125 |
+
.env
|
| 126 |
+
.venv
|
| 127 |
+
env/
|
| 128 |
+
venv/
|
| 129 |
+
ENV/
|
| 130 |
+
env.bak/
|
| 131 |
+
venv.bak/
|
| 132 |
+
|
| 133 |
+
# Spyder project settings
|
| 134 |
+
.spyderproject
|
| 135 |
+
.spyproject
|
| 136 |
+
|
| 137 |
+
# Rope project settings
|
| 138 |
+
.ropeproject
|
| 139 |
+
|
| 140 |
+
# mkdocs documentation
|
| 141 |
+
/site
|
| 142 |
|
| 143 |
# mypy
|
| 144 |
.mypy_cache/
|
| 145 |
.dmypy.json
|
| 146 |
+
dmypy.json
|
| 147 |
|
| 148 |
# Pyre type checker
|
| 149 |
.pyre/
|
| 150 |
|
| 151 |
+
# pytype static type analyzer
|
| 152 |
+
.pytype/
|
| 153 |
+
|
| 154 |
+
# Cython debug symbols
|
| 155 |
+
cython_debug/
|
| 156 |
+
|
| 157 |
+
# PyCharm
|
| 158 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 159 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 160 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 161 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 162 |
+
#.idea/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 3 |
+
rev: v5.0.0
|
| 4 |
+
hooks:
|
| 5 |
+
- id: check-executables-have-shebangs
|
| 6 |
+
- id: check-json
|
| 7 |
+
- id: check-merge-conflict
|
| 8 |
+
- id: check-shebang-scripts-are-executable
|
| 9 |
+
- id: check-toml
|
| 10 |
+
- id: check-yaml
|
| 11 |
+
- id: end-of-file-fixer
|
| 12 |
+
- id: mixed-line-ending
|
| 13 |
+
args: ["--fix=lf"]
|
| 14 |
+
- id: requirements-txt-fixer
|
| 15 |
+
- id: trailing-whitespace
|
| 16 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
| 17 |
+
rev: v0.8.6
|
| 18 |
+
hooks:
|
| 19 |
+
- id: ruff
|
| 20 |
+
args: ["--fix"]
|
| 21 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
| 22 |
+
rev: v1.14.1
|
| 23 |
+
hooks:
|
| 24 |
+
- id: mypy
|
| 25 |
+
args: ["--ignore-missing-imports"]
|
| 26 |
+
additional_dependencies:
|
| 27 |
+
[
|
| 28 |
+
"types-python-slugify",
|
| 29 |
+
"types-requests",
|
| 30 |
+
"types-PyYAML",
|
| 31 |
+
"types-pytz",
|
| 32 |
+
]
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.10
|
.vscode/extensions.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"recommendations": [
|
| 3 |
+
"ms-python.python",
|
| 4 |
+
"charliermarsh.ruff",
|
| 5 |
+
"streetsidesoftware.code-spell-checker",
|
| 6 |
+
"tamasfe.even-better-toml"
|
| 7 |
+
]
|
| 8 |
+
}
|
.vscode/settings.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"editor.formatOnSave": true,
|
| 3 |
+
"files.insertFinalNewline": false,
|
| 4 |
+
"[python]": {
|
| 5 |
+
"editor.defaultFormatter": "charliermarsh.ruff",
|
| 6 |
+
"editor.formatOnType": true,
|
| 7 |
+
"editor.codeActionsOnSave": {
|
| 8 |
+
"source.fixAll.ruff": "explicit"
|
| 9 |
+
}
|
| 10 |
+
},
|
| 11 |
+
"[jupyter]": {
|
| 12 |
+
"files.insertFinalNewline": false
|
| 13 |
+
},
|
| 14 |
+
"notebook.output.scrolling": true,
|
| 15 |
+
"notebook.formatOnSave.enabled": true
|
| 16 |
+
}
|
Dockerfile
DELETED
|
@@ -1,105 +0,0 @@
|
|
| 1 |
-
# Multi-stage build for AnyCoder Docker Space
|
| 2 |
-
|
| 3 |
-
# Stage 1: Build frontend
|
| 4 |
-
FROM node:22-slim AS frontend-builder
|
| 5 |
-
|
| 6 |
-
WORKDIR /build
|
| 7 |
-
|
| 8 |
-
# Copy frontend package files
|
| 9 |
-
COPY frontend/package*.json ./
|
| 10 |
-
RUN npm ci
|
| 11 |
-
|
| 12 |
-
# Copy all frontend source files and configs
|
| 13 |
-
COPY frontend/src ./src
|
| 14 |
-
COPY frontend/public ./public
|
| 15 |
-
COPY frontend/next.config.js ./
|
| 16 |
-
COPY frontend/tsconfig.json ./
|
| 17 |
-
COPY frontend/tailwind.config.js ./
|
| 18 |
-
COPY frontend/postcss.config.js ./
|
| 19 |
-
# Note: next-env.d.ts is auto-generated by Next.js, not needed for build
|
| 20 |
-
|
| 21 |
-
# Build frontend
|
| 22 |
-
RUN npm run build
|
| 23 |
-
|
| 24 |
-
# Stage 2: Production image
|
| 25 |
-
FROM python:3.11-slim
|
| 26 |
-
|
| 27 |
-
# Install system dependencies as root (git for pip, nodejs for frontend)
|
| 28 |
-
# Install Node.js 22 from NodeSource (Debian repo only has v18)
|
| 29 |
-
RUN apt-get update && \
|
| 30 |
-
apt-get install -y --no-install-recommends curl ca-certificates gnupg git && \
|
| 31 |
-
curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \
|
| 32 |
-
apt-get install -y --no-install-recommends nodejs && \
|
| 33 |
-
rm -rf /var/lib/apt/lists/*
|
| 34 |
-
|
| 35 |
-
# Set up a new user named "user" with user ID 1000
|
| 36 |
-
RUN useradd -m -u 1000 user
|
| 37 |
-
|
| 38 |
-
# Switch to the "user" user
|
| 39 |
-
USER user
|
| 40 |
-
|
| 41 |
-
# Set home to the user's home directory
|
| 42 |
-
ENV HOME=/home/user \
|
| 43 |
-
PATH=/home/user/.local/bin:$PATH \
|
| 44 |
-
PYTHONUNBUFFERED=1
|
| 45 |
-
|
| 46 |
-
# Set the working directory to the user's home directory
|
| 47 |
-
WORKDIR $HOME/app
|
| 48 |
-
|
| 49 |
-
# Copy Python requirements and install dependencies
|
| 50 |
-
COPY --chown=user:user requirements.txt .
|
| 51 |
-
RUN pip install --no-cache-dir --upgrade pip && \
|
| 52 |
-
pip install --no-cache-dir -r requirements.txt
|
| 53 |
-
|
| 54 |
-
# Copy application code
|
| 55 |
-
COPY --chown=user:user backend_api.py .
|
| 56 |
-
COPY --chown=user:user backend_models.py .
|
| 57 |
-
COPY --chown=user:user backend_docs_manager.py .
|
| 58 |
-
COPY --chown=user:user backend_prompts.py .
|
| 59 |
-
COPY --chown=user:user backend_parsers.py .
|
| 60 |
-
COPY --chown=user:user backend_deploy.py .
|
| 61 |
-
COPY --chown=user:user backend_search_replace.py .
|
| 62 |
-
COPY --chown=user:user project_importer.py .
|
| 63 |
-
|
| 64 |
-
# Copy built frontend from builder stage
|
| 65 |
-
COPY --chown=user:user --from=frontend-builder /build/.next ./frontend/.next
|
| 66 |
-
COPY --chown=user:user --from=frontend-builder /build/public ./frontend/public
|
| 67 |
-
COPY --chown=user:user --from=frontend-builder /build/package*.json ./frontend/
|
| 68 |
-
COPY --chown=user:user --from=frontend-builder /build/next.config.js ./frontend/
|
| 69 |
-
COPY --chown=user:user --from=frontend-builder /build/node_modules ./frontend/node_modules
|
| 70 |
-
|
| 71 |
-
# Set environment variables for the application
|
| 72 |
-
# BACKEND_HOST is used by Next.js server for proxying
|
| 73 |
-
# Do NOT set NEXT_PUBLIC_API_URL - let frontend use relative URLs
|
| 74 |
-
ENV BACKEND_HOST=http://localhost:8000 \
|
| 75 |
-
PORT=7860
|
| 76 |
-
|
| 77 |
-
# Create startup script that runs both services
|
| 78 |
-
# Backend on 8000, Frontend on 7860 (exposed port)
|
| 79 |
-
RUN echo '#!/bin/bash\n\
|
| 80 |
-
set -e\n\
|
| 81 |
-
\n\
|
| 82 |
-
echo "🚀 Starting AnyCoder Docker Space..."\n\
|
| 83 |
-
\n\
|
| 84 |
-
# Start backend on port 8000 in background\n\
|
| 85 |
-
echo "📡 Starting FastAPI backend on port 8000..."\n\
|
| 86 |
-
cd $HOME/app\n\
|
| 87 |
-
uvicorn backend_api:app --host 0.0.0.0 --port 8000 &\n\
|
| 88 |
-
BACKEND_PID=$!\n\
|
| 89 |
-
\n\
|
| 90 |
-
# Wait for backend to be ready\n\
|
| 91 |
-
echo "⏳ Waiting for backend to start..."\n\
|
| 92 |
-
sleep 5\n\
|
| 93 |
-
\n\
|
| 94 |
-
# Start frontend on port 7860 (HF Spaces exposed port)\n\
|
| 95 |
-
echo "🎨 Starting Next.js frontend on port 7860..."\n\
|
| 96 |
-
cd $HOME/app/frontend\n\
|
| 97 |
-
PORT=7860 BACKEND_HOST=http://localhost:8000 npm start\n\
|
| 98 |
-
' > $HOME/app/start.sh && chmod +x $HOME/app/start.sh
|
| 99 |
-
|
| 100 |
-
# Expose port 7860 (HF Spaces default)
|
| 101 |
-
EXPOSE 7860
|
| 102 |
-
|
| 103 |
-
# Run the startup script
|
| 104 |
-
CMD ["./start.sh"]
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,137 +1,13 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk:
|
| 7 |
-
|
|
|
|
| 8 |
pinned: false
|
| 9 |
-
disable_embedding:
|
| 10 |
-
hf_oauth: true
|
| 11 |
-
hf_oauth_expiration_minutes: 43200
|
| 12 |
-
hf_oauth_scopes:
|
| 13 |
-
- manage-repos
|
| 14 |
-
- write-discussions
|
| 15 |
---
|
| 16 |
|
| 17 |
-
|
| 18 |
-
# AnyCoder - AI Code Generator with React Frontend
|
| 19 |
-
|
| 20 |
-
AnyCoder is a full-stack AI-powered code generator with a modern React/TypeScript frontend and FastAPI backend. Generate applications by describing them in plain English, with support for multiple AI models and one-click deployment to Hugging Face Spaces.
|
| 21 |
-
|
| 22 |
-
## 🎨 Features
|
| 23 |
-
|
| 24 |
-
- **Modern React UI**: Apple-inspired design with VS Code layout
|
| 25 |
-
- **Real-time Streaming**: Server-Sent Events for live code generation
|
| 26 |
-
- **Multi-Model Support**: MiniMax M2, DeepSeek V3, and more via HuggingFace InferenceClient
|
| 27 |
-
- **Multiple Languages**: HTML, Gradio, Streamlit, React, Transformers.js, ComfyUI
|
| 28 |
-
- **Authentication**: HuggingFace OAuth + Dev mode for local testing
|
| 29 |
-
- **One-Click Deployment**: Deploy generated apps directly to HF Spaces
|
| 30 |
-
|
| 31 |
-
## 🏗️ Architecture
|
| 32 |
-
|
| 33 |
-
```
|
| 34 |
-
anycoder/
|
| 35 |
-
├── backend_api.py # FastAPI backend with streaming
|
| 36 |
-
├── frontend/ # Next.js React frontend
|
| 37 |
-
│ ├── src/
|
| 38 |
-
│ │ ├── app/ # Pages (page.tsx, layout.tsx, globals.css)
|
| 39 |
-
│ │ ├── components/ # React components
|
| 40 |
-
│ │ ├── lib/ # API client, auth utilities
|
| 41 |
-
│ │ └── types/ # TypeScript types
|
| 42 |
-
│ └── package.json
|
| 43 |
-
├── requirements.txt # Python dependencies
|
| 44 |
-
├── Dockerfile # Docker Space configuration
|
| 45 |
-
└── start_fullstack.sh # Local development script
|
| 46 |
-
```
|
| 47 |
-
|
| 48 |
-
## 🚀 Quick Start
|
| 49 |
-
|
| 50 |
-
### Local Development
|
| 51 |
-
|
| 52 |
-
1. **Backend**:
|
| 53 |
-
```bash
|
| 54 |
-
export HF_TOKEN="your_huggingface_token"
|
| 55 |
-
export GEMINI_API_KEY="your_gemini_api_key"
|
| 56 |
-
python backend_api.py
|
| 57 |
-
```
|
| 58 |
-
|
| 59 |
-
2. **Frontend** (new terminal):
|
| 60 |
-
```bash
|
| 61 |
-
cd frontend
|
| 62 |
-
npm install
|
| 63 |
-
npm run dev
|
| 64 |
-
```
|
| 65 |
-
|
| 66 |
-
3. Open `http://localhost:3000`
|
| 67 |
-
|
| 68 |
-
### Using start script:
|
| 69 |
-
```bash
|
| 70 |
-
export HF_TOKEN="your_token"
|
| 71 |
-
export GEMINI_API_KEY="your_gemini_api_key"
|
| 72 |
-
./start_fullstack.sh
|
| 73 |
-
```
|
| 74 |
-
|
| 75 |
-
## 🐳 Docker Space Deployment
|
| 76 |
-
|
| 77 |
-
This app runs as a Docker Space on HuggingFace. The Dockerfile:
|
| 78 |
-
- Builds the Next.js frontend
|
| 79 |
-
- Runs FastAPI backend on port 7860
|
| 80 |
-
- Uses proper user permissions (UID 1000)
|
| 81 |
-
- Handles environment variables securely
|
| 82 |
-
|
| 83 |
-
## 🔑 Authentication
|
| 84 |
-
|
| 85 |
-
- **Dev Mode** (localhost): Mock login for testing
|
| 86 |
-
- **Production**: HuggingFace OAuth with manage-repos scope
|
| 87 |
-
|
| 88 |
-
## 📝 Supported Languages
|
| 89 |
-
|
| 90 |
-
- `html` - Static HTML pages
|
| 91 |
-
- `gradio` - Python Gradio apps
|
| 92 |
-
- `streamlit` - Python Streamlit apps
|
| 93 |
-
- `react` - React/Next.js apps
|
| 94 |
-
- `transformers.js` - Browser ML apps
|
| 95 |
-
- `comfyui` - ComfyUI workflows
|
| 96 |
-
|
| 97 |
-
## 🤖 Available Models
|
| 98 |
-
|
| 99 |
-
- **Gemini 3 Pro Preview** (Default) - Google's latest with deep thinking & Google Search
|
| 100 |
-
- MiniMax M2 (via HF router with Novita)
|
| 101 |
-
- DeepSeek V3/V3.1
|
| 102 |
-
- DeepSeek R1
|
| 103 |
-
- And more via HuggingFace InferenceClient
|
| 104 |
-
|
| 105 |
-
## 🎯 Usage
|
| 106 |
-
|
| 107 |
-
1. Sign in with HuggingFace (or use Dev Login locally)
|
| 108 |
-
2. Select a language and AI model
|
| 109 |
-
3. Describe your app in the chat
|
| 110 |
-
4. Watch code generate in real-time
|
| 111 |
-
5. Click **🚀 Deploy** to publish to HF Spaces
|
| 112 |
-
|
| 113 |
-
## 🛠️ Environment Variables
|
| 114 |
-
|
| 115 |
-
- `HF_TOKEN` - HuggingFace API token (required)
|
| 116 |
-
- `GEMINI_API_KEY` - Google Gemini API key (required for Gemini 3 Pro Preview)
|
| 117 |
-
- `POE_API_KEY` - Poe API key (optional, for GPT-5 and Claude models)
|
| 118 |
-
- `DASHSCOPE_API_KEY` - DashScope API key (optional, for Qwen models)
|
| 119 |
-
- `OPENROUTER_API_KEY` - OpenRouter API key (optional, for Sherlock models)
|
| 120 |
-
- `MISTRAL_API_KEY` - Mistral API key (optional, for Mistral models)
|
| 121 |
-
|
| 122 |
-
## 📦 Tech Stack
|
| 123 |
-
|
| 124 |
-
**Frontend:**
|
| 125 |
-
- Next.js 14
|
| 126 |
-
- TypeScript
|
| 127 |
-
- Tailwind CSS
|
| 128 |
-
- Monaco Editor
|
| 129 |
-
|
| 130 |
-
**Backend:**
|
| 131 |
-
- FastAPI
|
| 132 |
-
- HuggingFace Hub
|
| 133 |
-
- Server-Sent Events (SSE)
|
| 134 |
-
|
| 135 |
-
## 📄 License
|
| 136 |
-
|
| 137 |
-
MIT
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Anycoder
|
| 3 |
+
emoji: 🏢
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.23.3
|
| 8 |
+
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
disable_embedding: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
---
|
| 12 |
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from app_huggingface import demo as demo_huggingface
|
| 2 |
+
from app_gemini_coder import demo as demo_gemini
|
| 3 |
+
from utils import get_app
|
| 4 |
+
import gradio as gr
|
| 5 |
+
|
| 6 |
+
# Create mapping of providers to their code snippets
|
| 7 |
+
PROVIDER_SNIPPETS = {
|
| 8 |
+
"Hugging Face": """
|
| 9 |
+
import gradio as gr
|
| 10 |
+
import ai_gradio
|
| 11 |
+
gr.load(
|
| 12 |
+
name='huggingface:deepseek-ai/DeepSeek-R1',
|
| 13 |
+
src=ai_gradio.registry,
|
| 14 |
+
coder=True,
|
| 15 |
+
provider="together"
|
| 16 |
+
).launch()""",
|
| 17 |
+
"Gemini Coder": """
|
| 18 |
+
import gradio as gr
|
| 19 |
+
import ai_gradio
|
| 20 |
+
gr.load(
|
| 21 |
+
name='gemini:gemini-2.5-pro-exp-03-25',
|
| 22 |
+
src=ai_gradio.registry,
|
| 23 |
+
coder=True,
|
| 24 |
+
provider="together"
|
| 25 |
+
).launch()
|
| 26 |
+
""",
|
| 27 |
+
}
|
| 28 |
+
# Create mapping of providers to their demos
|
| 29 |
+
PROVIDERS = {
|
| 30 |
+
"Hugging Face": demo_huggingface,
|
| 31 |
+
"Gemini Coder": demo_gemini,
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
# Modified get_app implementation
|
| 35 |
+
demo = gr.Blocks()
|
| 36 |
+
with demo:
|
| 37 |
+
|
| 38 |
+
provider_dropdown = gr.Dropdown(choices=list(PROVIDERS.keys()), value="Hugging Face", label="Select code snippet")
|
| 39 |
+
code_display = gr.Code(label="Provider Code Snippet", language="python", value=PROVIDER_SNIPPETS["Hugging Face"])
|
| 40 |
+
|
| 41 |
+
def update_code(provider):
|
| 42 |
+
return PROVIDER_SNIPPETS.get(provider, "Code snippet not available")
|
| 43 |
+
|
| 44 |
+
provider_dropdown.change(fn=update_code, inputs=[provider_dropdown], outputs=[code_display])
|
| 45 |
+
|
| 46 |
+
selected_demo = get_app(
|
| 47 |
+
models=list(PROVIDERS.keys()),
|
| 48 |
+
default_model="Hugging Face",
|
| 49 |
+
src=PROVIDERS,
|
| 50 |
+
dropdown_label="Select Provider",
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
demo.queue(api_open=False).launch(show_api=False)
|
app_allenai.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio_client import Client
|
| 3 |
+
|
| 4 |
+
MODELS = {"OLMo-2-1124-13B-Instruct": "akhaliq/olmo-anychat", "Llama-3.1-Tulu-3-8B": "akhaliq/allen-test"}
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def create_chat_fn(client):
|
| 8 |
+
def chat(message, history):
|
| 9 |
+
response = client.predict(
|
| 10 |
+
message=message,
|
| 11 |
+
system_prompt="You are a helpful AI assistant.",
|
| 12 |
+
temperature=0.7,
|
| 13 |
+
max_new_tokens=1024,
|
| 14 |
+
top_k=40,
|
| 15 |
+
repetition_penalty=1.1,
|
| 16 |
+
top_p=0.95,
|
| 17 |
+
api_name="/chat",
|
| 18 |
+
)
|
| 19 |
+
return response
|
| 20 |
+
|
| 21 |
+
return chat
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def set_client_for_session(model_name, request: gr.Request):
|
| 25 |
+
headers = {}
|
| 26 |
+
if request and hasattr(request, "request") and hasattr(request.request, "headers"):
|
| 27 |
+
x_ip_token = request.request.headers.get("x-ip-token")
|
| 28 |
+
if x_ip_token:
|
| 29 |
+
headers["X-IP-Token"] = x_ip_token
|
| 30 |
+
|
| 31 |
+
return Client(MODELS[model_name], headers=headers)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def safe_chat_fn(message, history, client):
|
| 35 |
+
if client is None:
|
| 36 |
+
return "Error: Client not initialized. Please refresh the page."
|
| 37 |
+
return create_chat_fn(client)(message, history)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
with gr.Blocks() as demo:
|
| 41 |
+
client = gr.State()
|
| 42 |
+
|
| 43 |
+
model_dropdown = gr.Dropdown(
|
| 44 |
+
choices=list(MODELS.keys()), value="OLMo-2-1124-13B-Instruct", label="Select Model", interactive=True
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
chat_interface = gr.ChatInterface(fn=safe_chat_fn, additional_inputs=[client])
|
| 48 |
+
|
| 49 |
+
# Update client when model changes
|
| 50 |
+
def update_model(model_name, request):
|
| 51 |
+
return set_client_for_session(model_name, request)
|
| 52 |
+
|
| 53 |
+
model_dropdown.change(
|
| 54 |
+
fn=update_model,
|
| 55 |
+
inputs=[model_dropdown],
|
| 56 |
+
outputs=[client],
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# Initialize client on page load
|
| 60 |
+
demo.load(
|
| 61 |
+
fn=set_client_for_session,
|
| 62 |
+
inputs=gr.State("OLMo-2-1124-13B-Instruct"),
|
| 63 |
+
outputs=client,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
if __name__ == "__main__":
|
| 67 |
+
demo.launch()
|
app_cerebras.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import cerebras_gradio
|
| 4 |
+
|
| 5 |
+
from utils import get_app
|
| 6 |
+
|
| 7 |
+
demo = get_app(
|
| 8 |
+
models=[
|
| 9 |
+
"llama3.1-8b",
|
| 10 |
+
"llama3.1-70b",
|
| 11 |
+
"llama3.1-405b",
|
| 12 |
+
],
|
| 13 |
+
default_model="llama3.1-70b",
|
| 14 |
+
src=cerebras_gradio.registry,
|
| 15 |
+
accept_token=not os.getenv("CEREBRAS_API_KEY"),
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
if __name__ == "__main__":
|
| 19 |
+
demo.launch()
|
app_claude.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import anthropic_gradio
|
| 4 |
+
|
| 5 |
+
from utils import get_app
|
| 6 |
+
|
| 7 |
+
demo = get_app(
|
| 8 |
+
models=[
|
| 9 |
+
"claude-3-5-sonnet-20241022",
|
| 10 |
+
"claude-3-5-haiku-20241022",
|
| 11 |
+
"claude-3-opus-20240229",
|
| 12 |
+
"claude-3-sonnet-20240229",
|
| 13 |
+
"claude-3-haiku-20240307",
|
| 14 |
+
],
|
| 15 |
+
default_model="claude-3-5-sonnet-20241022",
|
| 16 |
+
src=anthropic_gradio.registry,
|
| 17 |
+
accept_token=not os.getenv("ANTHROPIC_API_KEY"),
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
demo.launch()
|
app_cohere.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import cohere_gradio
|
| 4 |
+
|
| 5 |
+
from utils import get_app
|
| 6 |
+
|
| 7 |
+
demo = get_app(
|
| 8 |
+
models=[
|
| 9 |
+
"command-r",
|
| 10 |
+
"command-r-08-2024",
|
| 11 |
+
"command-r-plus",
|
| 12 |
+
"command-r-plus-08-2024",
|
| 13 |
+
"command-r7b-12-2024",
|
| 14 |
+
],
|
| 15 |
+
default_model="command-r7b-12-2024",
|
| 16 |
+
src=cohere_gradio.registry,
|
| 17 |
+
accept_token=not os.getenv("COHERE_API_KEY"),
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
demo.launch()
|
app_compare.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import google.generativeai as genai
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import openai
|
| 6 |
+
from anthropic import Anthropic
|
| 7 |
+
from openai import OpenAI # Add explicit OpenAI import
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def get_all_models():
|
| 11 |
+
"""Get all available models from the registries."""
|
| 12 |
+
return [
|
| 13 |
+
"SambaNova: Meta-Llama-3.2-1B-Instruct",
|
| 14 |
+
"SambaNova: Meta-Llama-3.2-3B-Instruct",
|
| 15 |
+
"SambaNova: Llama-3.2-11B-Vision-Instruct",
|
| 16 |
+
"SambaNova: Llama-3.2-90B-Vision-Instruct",
|
| 17 |
+
"SambaNova: Meta-Llama-3.1-8B-Instruct",
|
| 18 |
+
"SambaNova: Meta-Llama-3.1-70B-Instruct",
|
| 19 |
+
"SambaNova: Meta-Llama-3.1-405B-Instruct",
|
| 20 |
+
"Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 21 |
+
"Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
|
| 22 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
|
| 23 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
|
| 24 |
+
"Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
|
| 25 |
+
"Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
|
| 26 |
+
"Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
|
| 27 |
+
"Hyperbolic: deepseek-ai/DeepSeek-V2.5",
|
| 28 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
|
| 33 |
+
"""Generate a prompt for models to discuss and build upon previous
|
| 34 |
+
responses.
|
| 35 |
+
"""
|
| 36 |
+
prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
|
| 37 |
+
|
| 38 |
+
Previous responses from other AI models:
|
| 39 |
+
{chr(10).join(f"- {response}" for response in previous_responses)}
|
| 40 |
+
|
| 41 |
+
Please provide your perspective while:
|
| 42 |
+
1. Acknowledging key insights from previous responses
|
| 43 |
+
2. Adding any missing important points
|
| 44 |
+
3. Respectfully noting if you disagree with anything and explaining why
|
| 45 |
+
4. Building towards a complete answer
|
| 46 |
+
|
| 47 |
+
Keep your response focused and concise (max 3-4 paragraphs)."""
|
| 48 |
+
return prompt
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
|
| 52 |
+
"""Generate a prompt for final consensus building."""
|
| 53 |
+
return f"""Review this multi-AI discussion about: "{original_question}"
|
| 54 |
+
|
| 55 |
+
Discussion history:
|
| 56 |
+
{chr(10).join(discussion_history)}
|
| 57 |
+
|
| 58 |
+
As a final synthesizer, please:
|
| 59 |
+
1. Identify the key points where all models agreed
|
| 60 |
+
2. Explain how any disagreements were resolved
|
| 61 |
+
3. Present a clear, unified answer that represents our collective best understanding
|
| 62 |
+
4. Note any remaining uncertainties or caveats
|
| 63 |
+
|
| 64 |
+
Keep the final consensus concise but complete."""
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
|
| 68 |
+
import openai
|
| 69 |
+
|
| 70 |
+
client = openai.OpenAI(api_key=api_key)
|
| 71 |
+
response = client.chat.completions.create(model=model, messages=messages)
|
| 72 |
+
return response.choices[0].message.content
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
|
| 76 |
+
"""Chat with Anthropic's Claude model."""
|
| 77 |
+
client = Anthropic(api_key=api_key)
|
| 78 |
+
response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
|
| 79 |
+
return response.content[0].text
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
|
| 83 |
+
"""Chat with Gemini Pro model."""
|
| 84 |
+
genai.configure(api_key=api_key)
|
| 85 |
+
model = genai.GenerativeModel("gemini-pro")
|
| 86 |
+
|
| 87 |
+
# Convert messages to Gemini format
|
| 88 |
+
gemini_messages = []
|
| 89 |
+
for msg in messages:
|
| 90 |
+
role = "user" if msg["role"] == "user" else "model"
|
| 91 |
+
gemini_messages.append({"role": role, "parts": [msg["content"]]})
|
| 92 |
+
|
| 93 |
+
response = model.generate_content([m["parts"][0] for m in gemini_messages])
|
| 94 |
+
return response.text
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def chat_with_sambanova(
|
| 98 |
+
messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
|
| 99 |
+
) -> str:
|
| 100 |
+
"""Chat with SambaNova's models using their OpenAI-compatible API."""
|
| 101 |
+
client = openai.OpenAI(
|
| 102 |
+
api_key=api_key,
|
| 103 |
+
base_url="https://api.sambanova.ai/v1",
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
response = client.chat.completions.create(
|
| 107 |
+
model=model_name,
|
| 108 |
+
messages=messages,
|
| 109 |
+
temperature=0.1,
|
| 110 |
+
top_p=0.1, # Use the specific model name passed in
|
| 111 |
+
)
|
| 112 |
+
return response.choices[0].message.content
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def chat_with_hyperbolic(
|
| 116 |
+
messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
| 117 |
+
) -> str:
|
| 118 |
+
"""Chat with Hyperbolic's models using their OpenAI-compatible API."""
|
| 119 |
+
client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
|
| 120 |
+
|
| 121 |
+
# Add system message to the start of the messages list
|
| 122 |
+
full_messages = [
|
| 123 |
+
{"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
|
| 124 |
+
*messages,
|
| 125 |
+
]
|
| 126 |
+
|
| 127 |
+
response = client.chat.completions.create(
|
| 128 |
+
model=model_name, # Use the specific model name passed in
|
| 129 |
+
messages=full_messages,
|
| 130 |
+
temperature=0.7,
|
| 131 |
+
max_tokens=1024,
|
| 132 |
+
)
|
| 133 |
+
return response.choices[0].message.content
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def multi_model_consensus(
|
| 137 |
+
question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
|
| 138 |
+
) -> list[tuple[str, str]]:
|
| 139 |
+
if not selected_models:
|
| 140 |
+
raise gr.Error("Please select at least one model to chat with.")
|
| 141 |
+
|
| 142 |
+
chat_history = []
|
| 143 |
+
progress(0, desc="Getting responses from all models...")
|
| 144 |
+
|
| 145 |
+
# Get responses from all models in parallel
|
| 146 |
+
for i, model in enumerate(selected_models):
|
| 147 |
+
provider, model_name = model.split(": ", 1)
|
| 148 |
+
progress((i + 1) / len(selected_models), desc=f"Getting response from {model}...")
|
| 149 |
+
|
| 150 |
+
try:
|
| 151 |
+
if provider == "Anthropic":
|
| 152 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
| 153 |
+
response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
|
| 154 |
+
elif provider == "SambaNova":
|
| 155 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
| 156 |
+
response = chat_with_sambanova(
|
| 157 |
+
messages=[
|
| 158 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
| 159 |
+
{"role": "user", "content": question},
|
| 160 |
+
],
|
| 161 |
+
api_key=api_key,
|
| 162 |
+
model_name=model_name,
|
| 163 |
+
)
|
| 164 |
+
elif provider == "Hyperbolic":
|
| 165 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
| 166 |
+
response = chat_with_hyperbolic(
|
| 167 |
+
messages=[{"role": "user", "content": question}],
|
| 168 |
+
api_key=api_key,
|
| 169 |
+
model_name=model_name,
|
| 170 |
+
)
|
| 171 |
+
else: # Gemini
|
| 172 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
| 173 |
+
response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
|
| 174 |
+
|
| 175 |
+
chat_history.append((model, response))
|
| 176 |
+
except Exception as e:
|
| 177 |
+
chat_history.append((model, f"Error: {e!s}"))
|
| 178 |
+
|
| 179 |
+
progress(1.0, desc="Done!")
|
| 180 |
+
return chat_history
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
with gr.Blocks() as demo:
|
| 184 |
+
gr.Markdown("# Model Response Comparison")
|
| 185 |
+
gr.Markdown("""Select multiple models to compare their responses""")
|
| 186 |
+
|
| 187 |
+
with gr.Row():
|
| 188 |
+
with gr.Column():
|
| 189 |
+
model_selector = gr.Dropdown(
|
| 190 |
+
choices=get_all_models(),
|
| 191 |
+
multiselect=True,
|
| 192 |
+
label="Select Models",
|
| 193 |
+
info="Choose models to compare",
|
| 194 |
+
value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
chatbot = gr.Chatbot(height=600, label="Model Responses")
|
| 198 |
+
msg = gr.Textbox(label="Prompt", placeholder="Ask a question to compare model responses...")
|
| 199 |
+
|
| 200 |
+
def respond(message, selected_models):
|
| 201 |
+
chat_history = multi_model_consensus(message, selected_models, rounds=1)
|
| 202 |
+
return chat_history
|
| 203 |
+
|
| 204 |
+
msg.submit(respond, [msg, model_selector], [chatbot])
|
| 205 |
+
|
| 206 |
+
for fn in demo.fns.values():
|
| 207 |
+
fn.api_name = False
|
| 208 |
+
|
| 209 |
+
if __name__ == "__main__":
|
| 210 |
+
demo.launch()
|
app_crew.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
import gradio as gr
|
| 3 |
+
|
| 4 |
+
demo = gr.load(
|
| 5 |
+
name="crewai:gpt-4-turbo",
|
| 6 |
+
crew_type="article", # or 'support'
|
| 7 |
+
src=ai_gradio.registry,
|
| 8 |
+
)
|
app_deepseek.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the hyperbolic models but keep their full names for loading
|
| 6 |
+
DEEPSEEK_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("deepseek:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
DEEPSEEK_MODELS_DISPLAY = [k.replace("deepseek:", "") for k in DEEPSEEK_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=DEEPSEEK_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=DEEPSEEK_MODELS_FULL[-1],
|
| 16 |
+
dropdown_label="Select DeepSeek Model",
|
| 17 |
+
choices=DEEPSEEK_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
coder=True,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_experimental.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
+
import google.generativeai as genai
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import openai
|
| 7 |
+
from anthropic import Anthropic
|
| 8 |
+
from openai import OpenAI # Add explicit OpenAI import
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_all_models():
|
| 12 |
+
"""Get all available models from the registries."""
|
| 13 |
+
return [
|
| 14 |
+
"SambaNova: Meta-Llama-3.2-1B-Instruct",
|
| 15 |
+
"SambaNova: Meta-Llama-3.2-3B-Instruct",
|
| 16 |
+
"SambaNova: Llama-3.2-11B-Vision-Instruct",
|
| 17 |
+
"SambaNova: Llama-3.2-90B-Vision-Instruct",
|
| 18 |
+
"SambaNova: Meta-Llama-3.1-8B-Instruct",
|
| 19 |
+
"SambaNova: Meta-Llama-3.1-70B-Instruct",
|
| 20 |
+
"SambaNova: Meta-Llama-3.1-405B-Instruct",
|
| 21 |
+
"Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 22 |
+
"Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
|
| 23 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
|
| 24 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
|
| 25 |
+
"Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
|
| 26 |
+
"Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
|
| 27 |
+
"Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
|
| 28 |
+
"Hyperbolic: deepseek-ai/DeepSeek-V2.5",
|
| 29 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
|
| 34 |
+
"""Generate a prompt for models to discuss and build upon previous
|
| 35 |
+
responses.
|
| 36 |
+
"""
|
| 37 |
+
prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
|
| 38 |
+
|
| 39 |
+
Previous responses from other AI models:
|
| 40 |
+
{chr(10).join(f"- {response}" for response in previous_responses)}
|
| 41 |
+
|
| 42 |
+
Please provide your perspective while:
|
| 43 |
+
1. Acknowledging key insights from previous responses
|
| 44 |
+
2. Adding any missing important points
|
| 45 |
+
3. Respectfully noting if you disagree with anything and explaining why
|
| 46 |
+
4. Building towards a complete answer
|
| 47 |
+
|
| 48 |
+
Keep your response focused and concise (max 3-4 paragraphs)."""
|
| 49 |
+
return prompt
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
|
| 53 |
+
"""Generate a prompt for final consensus building."""
|
| 54 |
+
return f"""Review this multi-AI discussion about: "{original_question}"
|
| 55 |
+
|
| 56 |
+
Discussion history:
|
| 57 |
+
{chr(10).join(discussion_history)}
|
| 58 |
+
|
| 59 |
+
As a final synthesizer, please:
|
| 60 |
+
1. Identify the key points where all models agreed
|
| 61 |
+
2. Explain how any disagreements were resolved
|
| 62 |
+
3. Present a clear, unified answer that represents our collective best understanding
|
| 63 |
+
4. Note any remaining uncertainties or caveats
|
| 64 |
+
|
| 65 |
+
Keep the final consensus concise but complete."""
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
|
| 69 |
+
import openai
|
| 70 |
+
|
| 71 |
+
client = openai.OpenAI(api_key=api_key)
|
| 72 |
+
response = client.chat.completions.create(model=model, messages=messages)
|
| 73 |
+
return response.choices[0].message.content
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
|
| 77 |
+
"""Chat with Anthropic's Claude model."""
|
| 78 |
+
client = Anthropic(api_key=api_key)
|
| 79 |
+
response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
|
| 80 |
+
return response.content[0].text
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
|
| 84 |
+
"""Chat with Gemini Pro model."""
|
| 85 |
+
genai.configure(api_key=api_key)
|
| 86 |
+
model = genai.GenerativeModel("gemini-pro")
|
| 87 |
+
|
| 88 |
+
# Convert messages to Gemini format
|
| 89 |
+
gemini_messages = []
|
| 90 |
+
for msg in messages:
|
| 91 |
+
role = "user" if msg["role"] == "user" else "model"
|
| 92 |
+
gemini_messages.append({"role": role, "parts": [msg["content"]]})
|
| 93 |
+
|
| 94 |
+
response = model.generate_content([m["parts"][0] for m in gemini_messages])
|
| 95 |
+
return response.text
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def chat_with_sambanova(
|
| 99 |
+
messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
|
| 100 |
+
) -> str:
|
| 101 |
+
"""Chat with SambaNova's models using their OpenAI-compatible API."""
|
| 102 |
+
client = openai.OpenAI(
|
| 103 |
+
api_key=api_key,
|
| 104 |
+
base_url="https://api.sambanova.ai/v1",
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
response = client.chat.completions.create(
|
| 108 |
+
model=model_name,
|
| 109 |
+
messages=messages,
|
| 110 |
+
temperature=0.1,
|
| 111 |
+
top_p=0.1, # Use the specific model name passed in
|
| 112 |
+
)
|
| 113 |
+
return response.choices[0].message.content
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def chat_with_hyperbolic(
|
| 117 |
+
messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
| 118 |
+
) -> str:
|
| 119 |
+
"""Chat with Hyperbolic's models using their OpenAI-compatible API."""
|
| 120 |
+
client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
|
| 121 |
+
|
| 122 |
+
# Add system message to the start of the messages list
|
| 123 |
+
full_messages = [
|
| 124 |
+
{"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
|
| 125 |
+
*messages,
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
response = client.chat.completions.create(
|
| 129 |
+
model=model_name, # Use the specific model name passed in
|
| 130 |
+
messages=full_messages,
|
| 131 |
+
temperature=0.7,
|
| 132 |
+
max_tokens=1024,
|
| 133 |
+
)
|
| 134 |
+
return response.choices[0].message.content
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def multi_model_consensus(
|
| 138 |
+
question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
|
| 139 |
+
) -> list[tuple[str, str]]:
|
| 140 |
+
if not selected_models:
|
| 141 |
+
raise gr.Error("Please select at least one model to chat with.")
|
| 142 |
+
|
| 143 |
+
chat_history = []
|
| 144 |
+
discussion_history = []
|
| 145 |
+
|
| 146 |
+
# Initial responses
|
| 147 |
+
progress(0, desc="Getting initial responses...")
|
| 148 |
+
initial_responses = []
|
| 149 |
+
for i, model in enumerate(selected_models):
|
| 150 |
+
provider, model_name = model.split(": ", 1)
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
if provider == "Anthropic":
|
| 154 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
| 155 |
+
response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
|
| 156 |
+
elif provider == "SambaNova":
|
| 157 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
| 158 |
+
response = chat_with_sambanova(
|
| 159 |
+
messages=[
|
| 160 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
| 161 |
+
{"role": "user", "content": question},
|
| 162 |
+
],
|
| 163 |
+
api_key=api_key,
|
| 164 |
+
)
|
| 165 |
+
elif provider == "Hyperbolic": # Add Hyperbolic case
|
| 166 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
| 167 |
+
response = chat_with_hyperbolic(messages=[{"role": "user", "content": question}], api_key=api_key)
|
| 168 |
+
else: # Gemini
|
| 169 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
| 170 |
+
response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
|
| 171 |
+
|
| 172 |
+
initial_responses.append(f"{model}: {response}")
|
| 173 |
+
discussion_history.append(f"Initial response from {model}:\n{response}")
|
| 174 |
+
chat_history.append((f"Initial response from {model}", response))
|
| 175 |
+
except Exception as e:
|
| 176 |
+
chat_history.append((f"Error from {model}", str(e)))
|
| 177 |
+
|
| 178 |
+
# Discussion rounds
|
| 179 |
+
for round_num in range(rounds):
|
| 180 |
+
progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...")
|
| 181 |
+
round_responses = []
|
| 182 |
+
|
| 183 |
+
random.shuffle(selected_models) # Randomize order each round
|
| 184 |
+
for model in selected_models:
|
| 185 |
+
provider, model_name = model.split(": ", 1)
|
| 186 |
+
|
| 187 |
+
try:
|
| 188 |
+
discussion_prompt = generate_discussion_prompt(question, discussion_history)
|
| 189 |
+
if provider == "Anthropic":
|
| 190 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
| 191 |
+
response = chat_with_anthropic(
|
| 192 |
+
messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
|
| 193 |
+
)
|
| 194 |
+
elif provider == "SambaNova":
|
| 195 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
| 196 |
+
response = chat_with_sambanova(
|
| 197 |
+
messages=[
|
| 198 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
| 199 |
+
{"role": "user", "content": discussion_prompt},
|
| 200 |
+
],
|
| 201 |
+
api_key=api_key,
|
| 202 |
+
)
|
| 203 |
+
elif provider == "Hyperbolic": # Add Hyperbolic case
|
| 204 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
| 205 |
+
response = chat_with_hyperbolic(
|
| 206 |
+
messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
|
| 207 |
+
)
|
| 208 |
+
else: # Gemini
|
| 209 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
| 210 |
+
response = chat_with_gemini(
|
| 211 |
+
messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
round_responses.append(f"{model}: {response}")
|
| 215 |
+
discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
|
| 216 |
+
chat_history.append((f"Round {round_num + 1} - {model}", response))
|
| 217 |
+
except Exception as e:
|
| 218 |
+
chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
|
| 219 |
+
|
| 220 |
+
# Final consensus
|
| 221 |
+
progress(0.9, desc="Building final consensus...")
|
| 222 |
+
model = selected_models[0]
|
| 223 |
+
provider, model_name = model.split(": ", 1)
|
| 224 |
+
|
| 225 |
+
try:
|
| 226 |
+
consensus_prompt = generate_consensus_prompt(question, discussion_history)
|
| 227 |
+
if provider == "Anthropic":
|
| 228 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
| 229 |
+
final_consensus = chat_with_anthropic(
|
| 230 |
+
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
| 231 |
+
)
|
| 232 |
+
elif provider == "SambaNova":
|
| 233 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
| 234 |
+
final_consensus = chat_with_sambanova(
|
| 235 |
+
messages=[
|
| 236 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
| 237 |
+
{"role": "user", "content": consensus_prompt},
|
| 238 |
+
],
|
| 239 |
+
api_key=api_key,
|
| 240 |
+
)
|
| 241 |
+
elif provider == "Hyperbolic": # Add Hyperbolic case
|
| 242 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
| 243 |
+
final_consensus = chat_with_hyperbolic(
|
| 244 |
+
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
| 245 |
+
)
|
| 246 |
+
else: # Gemini
|
| 247 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
| 248 |
+
final_consensus = chat_with_gemini(
|
| 249 |
+
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
| 250 |
+
)
|
| 251 |
+
except Exception as e:
|
| 252 |
+
final_consensus = f"Error getting consensus from {model}: {e!s}"
|
| 253 |
+
|
| 254 |
+
chat_history.append(("Final Consensus", final_consensus))
|
| 255 |
+
|
| 256 |
+
progress(1.0, desc="Done!")
|
| 257 |
+
return chat_history
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
with gr.Blocks() as demo:
|
| 261 |
+
gr.Markdown("# Experimental Multi-Model Consensus Chat")
|
| 262 |
+
gr.Markdown(
|
| 263 |
+
"""Select multiple models to collaborate on answering your question.
|
| 264 |
+
The models will discuss with each other and attempt to reach a consensus.
|
| 265 |
+
Maximum 3 models can be selected at once."""
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
with gr.Row():
|
| 269 |
+
with gr.Column():
|
| 270 |
+
model_selector = gr.Dropdown(
|
| 271 |
+
choices=get_all_models(),
|
| 272 |
+
multiselect=True,
|
| 273 |
+
label="Select Models (max 3)",
|
| 274 |
+
info="Choose up to 3 models to participate in the discussion",
|
| 275 |
+
value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
|
| 276 |
+
max_choices=3,
|
| 277 |
+
)
|
| 278 |
+
rounds_slider = gr.Slider(
|
| 279 |
+
minimum=1,
|
| 280 |
+
maximum=2,
|
| 281 |
+
value=1,
|
| 282 |
+
step=1,
|
| 283 |
+
label="Discussion Rounds",
|
| 284 |
+
info="Number of rounds of discussion between models",
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion")
|
| 288 |
+
msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...")
|
| 289 |
+
|
| 290 |
+
def respond(message, selected_models, rounds):
|
| 291 |
+
chat_history = multi_model_consensus(message, selected_models, rounds)
|
| 292 |
+
return chat_history
|
| 293 |
+
|
| 294 |
+
msg.submit(respond, [msg, model_selector, rounds_slider], [chatbot], api_name="consensus_chat")
|
| 295 |
+
|
| 296 |
+
for fn in demo.fns.values():
|
| 297 |
+
fn.api_name = False
|
| 298 |
+
|
| 299 |
+
if __name__ == "__main__":
|
| 300 |
+
demo.launch()
|
app_fal.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import fal_gradio
|
| 2 |
+
|
| 3 |
+
from utils import get_app
|
| 4 |
+
|
| 5 |
+
demo = get_app(
|
| 6 |
+
models=[
|
| 7 |
+
"fal-ai/ltx-video",
|
| 8 |
+
"fal-ai/ltx-video/image-to-video",
|
| 9 |
+
"fal-ai/luma-photon",
|
| 10 |
+
],
|
| 11 |
+
default_model="fal-ai/luma-photon",
|
| 12 |
+
src=fal_gradio.registry,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
if __name__ == "__main__":
|
| 16 |
+
demo.launch()
|
app_fireworks.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import fireworks_gradio
|
| 4 |
+
|
| 5 |
+
from utils import get_app
|
| 6 |
+
|
| 7 |
+
demo = get_app(
|
| 8 |
+
models=[
|
| 9 |
+
"f1-preview",
|
| 10 |
+
"f1-mini-preview",
|
| 11 |
+
"llama-v3p3-70b-instruct",
|
| 12 |
+
],
|
| 13 |
+
default_model="llama-v3p3-70b-instruct",
|
| 14 |
+
src=fireworks_gradio.registry,
|
| 15 |
+
accept_token=not os.getenv("FIREWORKS_API_KEY"),
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
if __name__ == "__main__":
|
| 19 |
+
demo.launch()
|
app_gemini.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the Gemini models but keep their full names for loading
|
| 6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=GEMINI_MODELS_FULL[-1],
|
| 15 |
+
dropdown_label="Select Gemini Model",
|
| 16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
src=ai_gradio.registry,
|
| 18 |
+
fill_height=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
demo.launch()
|
app_gemini_camera.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the Gemini models but keep their full names for loading
|
| 6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=GEMINI_MODELS_FULL[-2],
|
| 15 |
+
dropdown_label="Select Gemini Model",
|
| 16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
src=ai_gradio.registry,
|
| 18 |
+
camera=True,
|
| 19 |
+
fill_height=True,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_gemini_coder.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the Gemini models but keep their full names for loading
|
| 6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=GEMINI_MODELS_FULL[0],
|
| 15 |
+
dropdown_label="Select Gemini Model",
|
| 16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
src=ai_gradio.registry,
|
| 18 |
+
fill_height=True,
|
| 19 |
+
coder=True,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_gemini_voice.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the Gemini models but keep their full names for loading
|
| 6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=GEMINI_MODELS_FULL[-2],
|
| 15 |
+
dropdown_label="Select Gemini Model",
|
| 16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
src=ai_gradio.registry,
|
| 18 |
+
enable_voice=True,
|
| 19 |
+
fill_height=True,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_groq.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the Groq models from the registry
|
| 6 |
+
GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
demo = get_app(
|
| 12 |
+
models=GROQ_MODELS_FULL,
|
| 13 |
+
default_model=GROQ_MODELS_FULL[-2],
|
| 14 |
+
src=ai_gradio.registry,
|
| 15 |
+
dropdown_label="Select Groq Model",
|
| 16 |
+
choices=GROQ_MODELS_DISPLAY,
|
| 17 |
+
fill_height=True,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
demo.launch()
|
app_groq_coder.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the Groq models but keep their full names for loading
|
| 6 |
+
GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=GROQ_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=GROQ_MODELS_FULL[-1],
|
| 15 |
+
dropdown_label="Select Groq Model",
|
| 16 |
+
choices=GROQ_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
fill_height=True,
|
| 18 |
+
coder=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_hf.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from utils import get_app
|
| 2 |
+
|
| 3 |
+
demo = get_app(
|
| 4 |
+
models=[
|
| 5 |
+
"microsoft/Phi-3.5-mini-instruct",
|
| 6 |
+
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
| 7 |
+
"google/gemma-2-2b-it",
|
| 8 |
+
"openai-community/gpt2",
|
| 9 |
+
"microsoft/phi-2",
|
| 10 |
+
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
| 11 |
+
],
|
| 12 |
+
default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
| 13 |
+
src="models",
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
if __name__ == "__main__":
|
| 17 |
+
demo.launch()
|
app_huggingface.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the hyperbolic models but keep their full names for loading
|
| 6 |
+
HUGGINGFACE_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("huggingface:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
HUGGINGFACE_MODELS_DISPLAY = [k.replace("huggingface:", "") for k in HUGGINGFACE_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=HUGGINGFACE_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=HUGGINGFACE_MODELS_FULL[0],
|
| 16 |
+
dropdown_label="Select Huggingface Model",
|
| 17 |
+
choices=HUGGINGFACE_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
coder=True,
|
| 20 |
+
provider="fireworks-ai",
|
| 21 |
+
bill_to="huggingface"
|
| 22 |
+
)
|
app_hyperbolic.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the hyperbolic models but keep their full names for loading
|
| 6 |
+
HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=HYPERBOLIC_MODELS_FULL[-2],
|
| 16 |
+
dropdown_label="Select Hyperbolic Model",
|
| 17 |
+
choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
)
|
app_hyperbolic_coder.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the hyperbolic models but keep their full names for loading
|
| 6 |
+
HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=HYPERBOLIC_MODELS_FULL[-2],
|
| 16 |
+
dropdown_label="Select Hyperbolic Model",
|
| 17 |
+
choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
coder=True,
|
| 20 |
+
)
|
app_langchain.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the hyperbolic models but keep their full names for loading
|
| 6 |
+
LANGCHAIN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("langchain:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
LANGCHAIN_MODELS_DISPLAY = [k.replace("langchain:", "") for k in LANGCHAIN_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=LANGCHAIN_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=LANGCHAIN_MODELS_FULL[0],
|
| 16 |
+
dropdown_label="Select Langchain Model",
|
| 17 |
+
choices=LANGCHAIN_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
demo.launch()
|
| 23 |
+
|
app_lumaai.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import lumaai_gradio
|
| 3 |
+
|
| 4 |
+
demo = gr.load(
|
| 5 |
+
name="dream-machine",
|
| 6 |
+
src=lumaai_gradio.registry,
|
| 7 |
+
)
|
app_marco_o1.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import spaces
|
| 3 |
+
import transformers_gradio
|
| 4 |
+
|
| 5 |
+
demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry)
|
| 6 |
+
demo.fn = spaces.GPU()(demo.fn)
|
| 7 |
+
|
| 8 |
+
for fn in demo.fns.values():
|
| 9 |
+
fn.api_name = False
|
| 10 |
+
|
| 11 |
+
if __name__ == "__main__":
|
| 12 |
+
demo.launch()
|
app_meta.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
demo = gr.load("models/meta-llama/Llama-3.3-70B-Instruct")
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
demo.launch()
|
app_mindsearch.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
# Load the Gradio space
|
| 4 |
+
demo = gr.load(name="internlm/MindSearch", src="spaces")
|
| 5 |
+
|
| 6 |
+
# Disable API access for all functions
|
| 7 |
+
if hasattr(demo, "fns"):
|
| 8 |
+
for fn in demo.fns.values():
|
| 9 |
+
fn.api_name = False
|
| 10 |
+
|
| 11 |
+
if __name__ == "__main__":
|
| 12 |
+
demo.launch()
|
app_minimax.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the hyperbolic models but keep their full names for loading
|
| 6 |
+
MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=MINIMAX_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=MINIMAX_MODELS_FULL[0],
|
| 16 |
+
dropdown_label="Select Minimax Model",
|
| 17 |
+
choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
demo.launch()
|
app_minimax_coder.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the hyperbolic models but keep their full names for loading
|
| 6 |
+
MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=MINIMAX_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=MINIMAX_MODELS_FULL[0],
|
| 16 |
+
dropdown_label="Select Minimax Model",
|
| 17 |
+
choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
coder=True
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_mistral.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the mistral models but keep their full names for loading
|
| 6 |
+
MISTRAL_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("mistral:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
MISTRAL_MODELS_DISPLAY = [k.replace("mistral:", "") for k in MISTRAL_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=MISTRAL_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=MISTRAL_MODELS_FULL[5],
|
| 16 |
+
dropdown_label="Select Mistral Model",
|
| 17 |
+
choices=MISTRAL_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
coder=True
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_moondream.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
# Load the Gradio space
|
| 4 |
+
demo = gr.load(name="akhaliq/moondream", src="spaces")
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Disable API access for all functions
|
| 8 |
+
if hasattr(demo, "fns"):
|
| 9 |
+
for fn in demo.fns.values():
|
| 10 |
+
fn.api_name = False
|
| 11 |
+
|
| 12 |
+
if __name__ == "__main__":
|
| 13 |
+
demo.launch()
|
app_nvidia.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the nvidia models but keep their full names for loading
|
| 6 |
+
NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=NVIDIA_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=NVIDIA_MODELS_FULL[0],
|
| 16 |
+
dropdown_label="Select Nvidia Model",
|
| 17 |
+
choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
demo.launch()
|
app_nvidia_coder.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the nvidia models but keep their full names for loading
|
| 6 |
+
NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=NVIDIA_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=NVIDIA_MODELS_FULL[-1],
|
| 16 |
+
dropdown_label="Select Nvidia Model",
|
| 17 |
+
choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
coder=True
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_omini.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
# Load the Gradio space
|
| 4 |
+
demo = gr.load(name="Yuanshi/OminiControl", src="spaces")
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Disable API access for all functions
|
| 8 |
+
if hasattr(demo, "fns"):
|
| 9 |
+
for fn in demo.fns.values():
|
| 10 |
+
fn.api_name = False
|
app_openai.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the OpenAI models but keep their full names for loading
|
| 6 |
+
OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=OPENAI_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=OPENAI_MODELS_FULL[-1],
|
| 15 |
+
dropdown_label="Select OpenAI Model",
|
| 16 |
+
choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
fill_height=True,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
demo.launch()
|
app_openai_coder.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the OpenAI models but keep their full names for loading
|
| 6 |
+
OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=OPENAI_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=OPENAI_MODELS_FULL[-1],
|
| 15 |
+
dropdown_label="Select OpenAI Model",
|
| 16 |
+
choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
fill_height=True,
|
| 18 |
+
coder=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
demo.launch()
|
app_openai_voice.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import openai_gradio
|
| 4 |
+
|
| 5 |
+
from utils import get_app
|
| 6 |
+
|
| 7 |
+
demo = get_app(
|
| 8 |
+
models=[
|
| 9 |
+
"gpt-4o-realtime-preview",
|
| 10 |
+
"gpt-4o-realtime-preview-2024-12-17",
|
| 11 |
+
"gpt-4o-realtime-preview-2024-10-01",
|
| 12 |
+
"gpt-4o-mini-realtime-preview",
|
| 13 |
+
"gpt-4o-mini-realtime-preview-2024-12-17",
|
| 14 |
+
],
|
| 15 |
+
default_model="gpt-4o-mini-realtime-preview-2024-12-17",
|
| 16 |
+
src=openai_gradio.registry,
|
| 17 |
+
accept_token=not os.getenv("OPENAI_API_KEY"),
|
| 18 |
+
twilio_sid=os.getenv("TWILIO_SID_OPENAI"),
|
| 19 |
+
twilio_token=os.getenv("TWILIO_AUTH_OPENAI"),
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_openrouter.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the OpenAI models but keep their full names for loading
|
| 6 |
+
OPENROUTER_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openrouter:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
OPENROUTER_MODELS_DISPLAY = [k.replace("openrouter:", "") for k in OPENROUTER_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
# Create and launch the interface using get_app utility
|
| 12 |
+
demo = get_app(
|
| 13 |
+
models=OPENROUTER_MODELS_FULL, # Use the full names with prefix
|
| 14 |
+
default_model=OPENROUTER_MODELS_FULL[-1],
|
| 15 |
+
dropdown_label="Select OpenRouter Model",
|
| 16 |
+
choices=OPENROUTER_MODELS_DISPLAY, # Display names without prefix
|
| 17 |
+
fill_height=True,
|
| 18 |
+
coder=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
if __name__ == "__main__":
|
| 22 |
+
demo.launch()
|
app_paligemma.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio_client import Client, handle_file
|
| 3 |
+
|
| 4 |
+
MODELS = {"Paligemma-10B": "akhaliq/paligemma2-10b-ft-docci-448"}
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
|
| 8 |
+
def chat(message, history):
|
| 9 |
+
text = message.get("text", "")
|
| 10 |
+
files = message.get("files", [])
|
| 11 |
+
processed_files = [handle_file(f) for f in files]
|
| 12 |
+
|
| 13 |
+
response = client.predict(
|
| 14 |
+
message={"text": text, "files": processed_files},
|
| 15 |
+
system_prompt=system_prompt,
|
| 16 |
+
temperature=temperature,
|
| 17 |
+
max_new_tokens=max_tokens,
|
| 18 |
+
top_k=top_k,
|
| 19 |
+
repetition_penalty=rep_penalty,
|
| 20 |
+
top_p=top_p,
|
| 21 |
+
api_name="/chat",
|
| 22 |
+
)
|
| 23 |
+
return response
|
| 24 |
+
|
| 25 |
+
return chat
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def set_client_for_session(model_name, request: gr.Request):
|
| 29 |
+
headers = {}
|
| 30 |
+
if request and hasattr(request, "headers"):
|
| 31 |
+
x_ip_token = request.headers.get("x-ip-token")
|
| 32 |
+
if x_ip_token:
|
| 33 |
+
headers["X-IP-Token"] = x_ip_token
|
| 34 |
+
|
| 35 |
+
return Client(MODELS[model_name], headers=headers)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def safe_chat_fn(message, history, client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
|
| 39 |
+
if client is None:
|
| 40 |
+
return "Error: Client not initialized. Please refresh the page."
|
| 41 |
+
try:
|
| 42 |
+
return create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p)(
|
| 43 |
+
message, history
|
| 44 |
+
)
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f"Error during chat: {e!s}")
|
| 47 |
+
return f"Error during chat: {e!s}"
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
with gr.Blocks() as demo:
|
| 51 |
+
client = gr.State()
|
| 52 |
+
|
| 53 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 54 |
+
system_prompt = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt")
|
| 55 |
+
with gr.Row():
|
| 56 |
+
temperature = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, label="Temperature")
|
| 57 |
+
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top P")
|
| 58 |
+
with gr.Row():
|
| 59 |
+
top_k = gr.Slider(minimum=1, maximum=100, value=40, step=1, label="Top K")
|
| 60 |
+
rep_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, label="Repetition Penalty")
|
| 61 |
+
max_tokens = gr.Slider(minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens")
|
| 62 |
+
|
| 63 |
+
chat_interface = gr.ChatInterface(
|
| 64 |
+
fn=safe_chat_fn,
|
| 65 |
+
additional_inputs=[client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p],
|
| 66 |
+
multimodal=True,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Initialize client on page load with default model
|
| 70 |
+
demo.load(fn=set_client_for_session, inputs=[gr.State("Paligemma-10B")], outputs=[client]) # Using default model
|
| 71 |
+
|
| 72 |
+
# Move the API access check here, after demo is defined
|
| 73 |
+
if hasattr(demo, "fns"):
|
| 74 |
+
for fn in demo.fns.values():
|
| 75 |
+
fn.api_name = False
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
demo.launch()
|
app_perplexity.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import perplexity_gradio
|
| 4 |
+
|
| 5 |
+
from utils import get_app
|
| 6 |
+
|
| 7 |
+
demo = get_app(
|
| 8 |
+
models=[
|
| 9 |
+
"llama-3.1-sonar-large-128k-online",
|
| 10 |
+
"llama-3.1-sonar-small-128k-online",
|
| 11 |
+
"llama-3.1-sonar-huge-128k-online",
|
| 12 |
+
"llama-3.1-sonar-small-128k-chat",
|
| 13 |
+
"llama-3.1-sonar-large-128k-chat",
|
| 14 |
+
"llama-3.1-8b-instruct",
|
| 15 |
+
"llama-3.1-70b-instruct",
|
| 16 |
+
],
|
| 17 |
+
default_model="llama-3.1-sonar-huge-128k-online",
|
| 18 |
+
src=perplexity_gradio.registry,
|
| 19 |
+
accept_token=not os.getenv("PERPLEXITY_API_KEY"),
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
demo.launch()
|
app_playai.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import playai_gradio
|
| 3 |
+
|
| 4 |
+
demo = gr.load(
|
| 5 |
+
name="PlayDialog",
|
| 6 |
+
src=playai_gradio.registry,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
for fn in demo.fns.values():
|
| 10 |
+
fn.api_name = False
|
app_qwen.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ai_gradio
|
| 2 |
+
|
| 3 |
+
from utils_ai_gradio import get_app
|
| 4 |
+
|
| 5 |
+
# Get the qwen models but keep their full names for loading
|
| 6 |
+
QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
|
| 7 |
+
|
| 8 |
+
# Create display names without the prefix
|
| 9 |
+
QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Create and launch the interface using get_app utility
|
| 13 |
+
demo = get_app(
|
| 14 |
+
models=QWEN_MODELS_FULL, # Use the full names with prefix
|
| 15 |
+
default_model=QWEN_MODELS_FULL[-1],
|
| 16 |
+
dropdown_label="Select Qwen Model",
|
| 17 |
+
choices=QWEN_MODELS_DISPLAY, # Display names without prefix
|
| 18 |
+
fill_height=True,
|
| 19 |
+
)
|