Upload 4 files
Browse files- README (1).md +114 -0
- app.py +932 -0
- mcp_server.py +639 -0
- requirements.txt +18 -0
README (1).md
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: AI Marketing Content Generator
|
3 |
+
emoji: 🎨
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.33.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
---
|
12 |
+
|
13 |
+
# 🎨 AI Marketing Content Generator
|
14 |
+
|
15 |
+
Generate professional marketing images with AI using FLUX.1-schnell. Perfect for content creators, marketers, and social media managers!
|
16 |
+
|
17 |
+
## ✨ Features
|
18 |
+
|
19 |
+
### 🖼️ Single Image Generation
|
20 |
+
- Create individual marketing assets
|
21 |
+
- Apply professional style presets
|
22 |
+
- Optimized prompts for marketing content
|
23 |
+
|
24 |
+
### 🔄 A/B Testing Batch
|
25 |
+
- Generate multiple variations of the same concept
|
26 |
+
- Compare different approaches
|
27 |
+
- Perfect for testing engagement
|
28 |
+
|
29 |
+
### 📱 Social Media Pack
|
30 |
+
- Generate platform-optimized images
|
31 |
+
- Support for Instagram, Twitter, LinkedIn, Facebook, YouTube
|
32 |
+
- Correct aspect ratios for each platform
|
33 |
+
|
34 |
+
### 📝 Prompt Templates
|
35 |
+
- Pre-built templates for common use cases
|
36 |
+
- Product photography, social announcements, blog headers
|
37 |
+
- Easy variable substitution
|
38 |
+
|
39 |
+
## 🚀 How to Use
|
40 |
+
|
41 |
+
1. **Choose Your Task**: Select the appropriate tab
|
42 |
+
2. **Enter Your Prompt**: Be specific about what you want
|
43 |
+
3. **Adjust Settings**: Style and quality options
|
44 |
+
4. **Generate**: Click the button and wait for results
|
45 |
+
|
46 |
+
## 💡 Pro Tips
|
47 |
+
|
48 |
+
- **Be Specific**: "Red sports car on mountain road at sunset" > "car"
|
49 |
+
- **Use Style Presets**: Apply consistent branding with style options
|
50 |
+
- **FLUX.1-schnell is Fast**: Works best with 1-4 inference steps
|
51 |
+
- **Test Variations**: Use A/B testing to find what works best
|
52 |
+
|
53 |
+
## 🎯 Example Prompts
|
54 |
+
|
55 |
+
### Product Photography
|
56 |
+
```
|
57 |
+
Professional product photography of wireless headphones, white background, studio lighting, commercial style
|
58 |
+
```
|
59 |
+
|
60 |
+
### Social Media Post
|
61 |
+
```
|
62 |
+
Vibrant Instagram post announcing summer sale, bold colors, modern design, eye-catching
|
63 |
+
```
|
64 |
+
|
65 |
+
### Blog Header
|
66 |
+
```
|
67 |
+
Minimalist blog header about productivity tips, clean design, professional, engaging
|
68 |
+
```
|
69 |
+
|
70 |
+
## 🛠️ Technical Details
|
71 |
+
|
72 |
+
- **Model**: FLUX.1-schnell (Schwarzer Labs)
|
73 |
+
- **Backend**: Hugging Face Diffusers + Inference API
|
74 |
+
- **Frontend**: Gradio
|
75 |
+
- **Inference Steps**: 1-4 (optimized for speed)
|
76 |
+
|
77 |
+
## 📊 Supported Platforms & Sizes
|
78 |
+
|
79 |
+
| Platform | Resolution |
|
80 |
+
|----------|------------|
|
81 |
+
| Instagram Post | 1024×1024 |
|
82 |
+
| Instagram Story | 1024×1820 |
|
83 |
+
| Twitter Post | 1200×675 |
|
84 |
+
| LinkedIn Post | 1200×1200 |
|
85 |
+
| Facebook Cover | 1200×630 |
|
86 |
+
| YouTube Thumbnail | 1280×720 |
|
87 |
+
|
88 |
+
## ⚡ Performance
|
89 |
+
|
90 |
+
- **Generation Time**: 10-30 seconds per image
|
91 |
+
- **Model**: Optimized for speed and quality
|
92 |
+
- **Hardware**: Runs on Hugging Face Spaces infrastructure
|
93 |
+
|
94 |
+
## 🔧 Setup for Your Own Space
|
95 |
+
|
96 |
+
1. **Fork this Space** or create a new one
|
97 |
+
2. **Set Environment Variables**:
|
98 |
+
- `HF_TOKEN`: Your Hugging Face token (for API access)
|
99 |
+
3. **Choose Hardware**: CPU (free) or GPU (faster)
|
100 |
+
4. **Deploy**: Push your changes
|
101 |
+
|
102 |
+
## 📝 License
|
103 |
+
|
104 |
+
MIT License - Feel free to use and modify for your projects!
|
105 |
+
|
106 |
+
## 🤝 Contributing
|
107 |
+
|
108 |
+
Issues and pull requests welcome! Help make this tool even better for the marketing community.
|
109 |
+
|
110 |
+
---
|
111 |
+
|
112 |
+
**Made with ❤️ for content creators and marketers**
|
113 |
+
|
114 |
+
*Powered by FLUX.1-schnell, Hugging Face, and Gradio*
|
app.py
ADDED
@@ -0,0 +1,932 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import base64
|
3 |
+
import asyncio
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from mcp import ClientSession, StdioServerParameters, types
|
7 |
+
from mcp.client.stdio import stdio_client
|
8 |
+
from typing import List
|
9 |
+
import nest_asyncio
|
10 |
+
import threading
|
11 |
+
import queue
|
12 |
+
import time
|
13 |
+
from PIL import Image
|
14 |
+
from io import BytesIO
|
15 |
+
os.makedirs("ContentCreator/created_image", exist_ok=True)
|
16 |
+
|
17 |
+
nest_asyncio.apply()
|
18 |
+
|
19 |
+
|
20 |
+
class MCP_Modal_Marketing_Tool:
|
21 |
+
def __init__(self):
|
22 |
+
self.session: ClientSession = None
|
23 |
+
self.available_tools: List[dict] = []
|
24 |
+
self.is_connected = False
|
25 |
+
self.request_queue = queue.Queue()
|
26 |
+
self.result_queue = queue.Queue()
|
27 |
+
|
28 |
+
async def call_mcp_tool(self, tool_name: str, arguments: dict):
|
29 |
+
"""Generic method to call any MCP tool"""
|
30 |
+
try:
|
31 |
+
result = await self.session.call_tool(tool_name, arguments=arguments)
|
32 |
+
if hasattr(result, 'content') and result.content:
|
33 |
+
return result.content[0].text
|
34 |
+
return None
|
35 |
+
except Exception as e:
|
36 |
+
print(f"Error calling tool {tool_name}: {str(e)}")
|
37 |
+
raise e
|
38 |
+
|
39 |
+
async def process_queue(self):
|
40 |
+
"""Process requests from the queue"""
|
41 |
+
while True:
|
42 |
+
try:
|
43 |
+
if not self.request_queue.empty():
|
44 |
+
item = self.request_queue.get()
|
45 |
+
if item == "STOP":
|
46 |
+
break
|
47 |
+
|
48 |
+
tool_name, arguments, request_id = item
|
49 |
+
try:
|
50 |
+
result = await self.call_mcp_tool(tool_name, arguments)
|
51 |
+
self.result_queue.put(("success", result, request_id))
|
52 |
+
except Exception as e:
|
53 |
+
self.result_queue.put(("error", str(e), request_id))
|
54 |
+
else:
|
55 |
+
await asyncio.sleep(0.1)
|
56 |
+
except Exception as e:
|
57 |
+
print(f"Error in process_queue: {str(e)}")
|
58 |
+
|
59 |
+
async def connect_to_server_and_run(self):
|
60 |
+
"""Connect to MCP server and start processing"""
|
61 |
+
server_params = StdioServerParameters(
|
62 |
+
command="python",
|
63 |
+
args=["mcp_server.py"],
|
64 |
+
env=None,
|
65 |
+
)
|
66 |
+
|
67 |
+
async with stdio_client(server_params) as (read, write):
|
68 |
+
async with ClientSession(read, write) as session:
|
69 |
+
self.session = session
|
70 |
+
await session.initialize()
|
71 |
+
|
72 |
+
response = await session.list_tools()
|
73 |
+
tools = response.tools
|
74 |
+
print("Connected to MCP server with tools:",
|
75 |
+
[tool.name for tool in tools])
|
76 |
+
|
77 |
+
self.available_tools = [{
|
78 |
+
"name": tool.name,
|
79 |
+
"description": tool.description,
|
80 |
+
"input_schema": tool.inputSchema
|
81 |
+
} for tool in tools]
|
82 |
+
|
83 |
+
self.is_connected = True
|
84 |
+
print("Marketing Tool MCP Server connected!")
|
85 |
+
|
86 |
+
# Check Modal health
|
87 |
+
health_result = await self.call_mcp_tool("health_check", {})
|
88 |
+
print(f"Modal API Status: {health_result}")
|
89 |
+
|
90 |
+
await self.process_queue()
|
91 |
+
|
92 |
+
|
93 |
+
# Global instance
|
94 |
+
marketing_tool = MCP_Modal_Marketing_Tool()
|
95 |
+
|
96 |
+
|
97 |
+
def wait_for_result(request_id, timeout=300):
|
98 |
+
"""Wait for a result with a specific request ID"""
|
99 |
+
start_time = time.time()
|
100 |
+
while time.time() - start_time < timeout:
|
101 |
+
if not marketing_tool.result_queue.empty():
|
102 |
+
status, result, result_id = marketing_tool.result_queue.get()
|
103 |
+
if result_id == request_id:
|
104 |
+
return status, result
|
105 |
+
else:
|
106 |
+
# Put it back if it's not our result
|
107 |
+
marketing_tool.result_queue.put((status, result, result_id))
|
108 |
+
time.sleep(0.1)
|
109 |
+
return "error", "Timeout"
|
110 |
+
|
111 |
+
|
112 |
+
def decode_and_save_image(image_b64, filename):
|
113 |
+
"""Decode base64 and save image"""
|
114 |
+
import base64
|
115 |
+
from PIL import Image
|
116 |
+
from io import BytesIO
|
117 |
+
|
118 |
+
# Ensure the path is inside created_image/
|
119 |
+
full_path = os.path.join("ContentCreator/created_image", filename)
|
120 |
+
image_b64 = image_b64.strip()
|
121 |
+
missing_padding = len(image_b64) % 4
|
122 |
+
if missing_padding:
|
123 |
+
image_b64 += '=' * (4 - missing_padding)
|
124 |
+
|
125 |
+
image_data = base64.b64decode(image_b64)
|
126 |
+
image = Image.open(BytesIO(image_data))
|
127 |
+
image.save(full_path)
|
128 |
+
return full_path
|
129 |
+
|
130 |
+
|
131 |
+
def single_image_generation(prompt, num_steps, style):
|
132 |
+
"""Generate a single image with optional style"""
|
133 |
+
if not marketing_tool.is_connected:
|
134 |
+
return None, "⚠️ MCP Server not connected. Please wait a few seconds and try again."
|
135 |
+
|
136 |
+
try:
|
137 |
+
request_id = f"single_{time.time()}"
|
138 |
+
|
139 |
+
# Apply style if selected
|
140 |
+
if style != "none":
|
141 |
+
style_request_id = f"style_{time.time()}"
|
142 |
+
marketing_tool.request_queue.put((
|
143 |
+
"add_style_modifier",
|
144 |
+
{"prompt": prompt, "style": style},
|
145 |
+
style_request_id
|
146 |
+
))
|
147 |
+
|
148 |
+
status, result = wait_for_result(style_request_id, timeout=50)
|
149 |
+
if status == "success":
|
150 |
+
style_data = json.loads(result)
|
151 |
+
prompt = style_data["enhanced_prompt"]
|
152 |
+
|
153 |
+
# Generate image
|
154 |
+
marketing_tool.request_queue.put((
|
155 |
+
"generate_and_save_image",
|
156 |
+
{"prompt": prompt, "num_inference_steps": num_steps},
|
157 |
+
request_id
|
158 |
+
))
|
159 |
+
|
160 |
+
status, result = wait_for_result(request_id)
|
161 |
+
|
162 |
+
if status == "success":
|
163 |
+
filename = decode_and_save_image(
|
164 |
+
result, f"generated_{int(time.time())}.png")
|
165 |
+
return filename, f"✅ Image generated successfully!\n📝 Final prompt: {prompt}"
|
166 |
+
else:
|
167 |
+
return None, f"❌ Error: {result}"
|
168 |
+
|
169 |
+
except Exception as e:
|
170 |
+
return None, f"❌ Error: {str(e)}"
|
171 |
+
|
172 |
+
|
173 |
+
# Update the batch generation function in app.py
|
174 |
+
def enhanced_batch_generation(prompt, variation_type, count, num_steps):
|
175 |
+
"""Generate strategic variations for A/B testing"""
|
176 |
+
if not marketing_tool.is_connected:
|
177 |
+
return None, "⚠️ MCP Server not connected. Please wait a few seconds and try again."
|
178 |
+
|
179 |
+
try:
|
180 |
+
request_id = f"smart_batch_{time.time()}"
|
181 |
+
marketing_tool.request_queue.put((
|
182 |
+
"batch_generate_smart_variations",
|
183 |
+
{
|
184 |
+
"prompt": prompt,
|
185 |
+
"count": count,
|
186 |
+
"variation_type": variation_type,
|
187 |
+
"num_inference_steps": num_steps
|
188 |
+
},
|
189 |
+
request_id
|
190 |
+
))
|
191 |
+
|
192 |
+
status, result = wait_for_result(request_id, timeout=300) # Longer timeout for batch
|
193 |
+
|
194 |
+
if status == "success":
|
195 |
+
batch_data = json.loads(result)
|
196 |
+
images = []
|
197 |
+
variation_details = []
|
198 |
+
|
199 |
+
for i, img_data in enumerate(batch_data["images"]):
|
200 |
+
filename = decode_and_save_image(
|
201 |
+
img_data["image_base64"],
|
202 |
+
f"variation_{i+1}_{int(time.time())}.png"
|
203 |
+
)
|
204 |
+
images.append(filename)
|
205 |
+
|
206 |
+
variation_details.append(
|
207 |
+
f"**Variation {i+1}:** {img_data['variation_description']}\n"
|
208 |
+
f"*Testing Purpose:* {img_data['testing_purpose']}\n"
|
209 |
+
)
|
210 |
+
|
211 |
+
strategy_explanation = batch_data.get("testing_strategy", "")
|
212 |
+
|
213 |
+
status_message = (
|
214 |
+
f"✅ Generated {len(images)} strategic variations!\n\n"
|
215 |
+
f"**Testing Strategy:** {strategy_explanation}\n\n"
|
216 |
+
f"**Variations Created:**\n" +
|
217 |
+
"\n".join(variation_details) +
|
218 |
+
f"\n💡 **Next Steps:** Post each variation and track engagement metrics to see which performs best!"
|
219 |
+
)
|
220 |
+
|
221 |
+
return images, status_message
|
222 |
+
else:
|
223 |
+
return None, f"❌ Error: {result}"
|
224 |
+
|
225 |
+
except Exception as e:
|
226 |
+
return None, f"❌ Error: {str(e)}"
|
227 |
+
|
228 |
+
# Update the strategy info based on selection
|
229 |
+
def update_strategy_info(variation_type):
|
230 |
+
strategy_descriptions = {
|
231 |
+
"mixed": {
|
232 |
+
"title": "Mixed Strategy Testing",
|
233 |
+
"description": "Tests multiple variables (colors, layout, mood) to find overall best approach",
|
234 |
+
"use_case": "Best for comprehensive optimization when you're not sure what to test first"
|
235 |
+
},
|
236 |
+
"color_schemes": {
|
237 |
+
"title": "Color Psychology Testing",
|
238 |
+
"description": "Tests how different color schemes affect emotional response and engagement",
|
239 |
+
"use_case": "Great for brand content, product launches, and emotional marketing"
|
240 |
+
},
|
241 |
+
"composition_styles": {
|
242 |
+
"title": "Layout & Composition Testing",
|
243 |
+
"description": "Tests different visual arrangements and focal points",
|
244 |
+
"use_case": "Perfect for optimizing visual hierarchy and user attention flow"
|
245 |
+
},
|
246 |
+
"emotional_tones": {
|
247 |
+
"title": "Emotional Tone Testing",
|
248 |
+
"description": "Tests different moods and feelings to see what resonates with your audience",
|
249 |
+
"use_case": "Ideal for brand personality and audience connection optimization"
|
250 |
+
},
|
251 |
+
"social_media": {
|
252 |
+
"title": "Platform Optimization Testing",
|
253 |
+
"description": "Tests platform-specific elements and styles",
|
254 |
+
"use_case": "Essential for multi-platform content strategies"
|
255 |
+
},
|
256 |
+
"engagement_hooks": {
|
257 |
+
"title": "Attention-Grabbing Testing",
|
258 |
+
"description": "Tests different ways to capture and hold viewer attention",
|
259 |
+
"use_case": "Critical for improving reach and stopping scroll behavior"
|
260 |
+
},
|
261 |
+
"brand_positioning": {
|
262 |
+
"title": "Brand Positioning Testing",
|
263 |
+
"description": "Tests how different brand personalities affect audience perception",
|
264 |
+
"use_case": "Important for brand development and target audience alignment"
|
265 |
+
}
|
266 |
+
}
|
267 |
+
|
268 |
+
info = strategy_descriptions.get(variation_type, strategy_descriptions["mixed"])
|
269 |
+
return f"""
|
270 |
+
**💡 Current Strategy:** {info['title']}
|
271 |
+
|
272 |
+
**What this tests:** {info['description']}
|
273 |
+
|
274 |
+
**Best for:** {info['use_case']}
|
275 |
+
"""
|
276 |
+
|
277 |
+
def social_media_generation(prompt, platforms, num_steps):
|
278 |
+
"""Generate images for multiple social media platforms with correct resolutions"""
|
279 |
+
if not marketing_tool.is_connected:
|
280 |
+
return None, "MCP Server not connected"
|
281 |
+
|
282 |
+
try:
|
283 |
+
request_id = f"social_{time.time()}"
|
284 |
+
marketing_tool.request_queue.put((
|
285 |
+
"generate_social_media_set",
|
286 |
+
{"prompt": prompt, "platforms": platforms, "num_inference_steps": num_steps},
|
287 |
+
request_id
|
288 |
+
))
|
289 |
+
|
290 |
+
status, result = wait_for_result(request_id)
|
291 |
+
|
292 |
+
if status == "success":
|
293 |
+
social_data = json.loads(result)
|
294 |
+
results = []
|
295 |
+
|
296 |
+
for platform_data in social_data["results"]:
|
297 |
+
filename = decode_and_save_image(
|
298 |
+
platform_data["image_base64"],
|
299 |
+
f"{platform_data['platform']}_{platform_data['resolution']}_{int(time.time())}.png"
|
300 |
+
)
|
301 |
+
results.append((platform_data["platform"], filename, platform_data["resolution"]))
|
302 |
+
|
303 |
+
# Create a status message with resolutions
|
304 |
+
if results:
|
305 |
+
status_msg = "Generated images:\n" + "\n".join([
|
306 |
+
f"• {r[0]}: {r[2]}" for r in results
|
307 |
+
])
|
308 |
+
return [r[1] for r in results], status_msg
|
309 |
+
else:
|
310 |
+
return None, "No images generated"
|
311 |
+
else:
|
312 |
+
return None, f"Error: {result}"
|
313 |
+
|
314 |
+
except Exception as e:
|
315 |
+
return None, f"Error: {str(e)}"
|
316 |
+
|
317 |
+
|
318 |
+
def start_mcp_server():
|
319 |
+
"""Start MCP server in background"""
|
320 |
+
def run_server():
|
321 |
+
asyncio.run(marketing_tool.connect_to_server_and_run())
|
322 |
+
|
323 |
+
thread = threading.Thread(target=run_server, daemon=True)
|
324 |
+
thread.start()
|
325 |
+
return thread
|
326 |
+
|
327 |
+
|
328 |
+
# Platform size presets for reference
|
329 |
+
SIZE_PRESETS = {
|
330 |
+
"instagram_post": (1080, 1080),
|
331 |
+
"instagram_story": (1080, 1920),
|
332 |
+
"twitter_post": (1200, 675),
|
333 |
+
"linkedin_post": (1200, 1200),
|
334 |
+
"facebook_cover": (1200, 630),
|
335 |
+
"youtube_thumbnail": (1280, 720)
|
336 |
+
}
|
337 |
+
|
338 |
+
# Create Gradio interface with tabs
|
339 |
+
with gr.Blocks(title="AI Marketing Content Generator") as demo:
|
340 |
+
gr.Markdown("""
|
341 |
+
# 🎨 AI Marketing Content Generator
|
342 |
+
### Powered by Flux AI on Modal GPU via MCP
|
343 |
+
|
344 |
+
Generate professional marketing images with AI - optimized for content creators and marketers!
|
345 |
+
|
346 |
+
⏰ **Please wait 5-10 seconds after launching for the MCP server to connect**
|
347 |
+
""")
|
348 |
+
|
349 |
+
# Connection status
|
350 |
+
connection_status = gr.Markdown("🔄 Connecting to MCP server...")
|
351 |
+
|
352 |
+
with gr.Tabs():
|
353 |
+
# Instructions Tab (First)
|
354 |
+
# Instructions Tab (First)
|
355 |
+
with gr.TabItem("📖 Quick Start"):
|
356 |
+
gr.Markdown("""
|
357 |
+
## 🚀 Get Started in 3 Steps
|
358 |
+
|
359 |
+
1. **Wait for Connection** ✅ (Status shown above)
|
360 |
+
2. **Pick a Tab** → Choose what you need to create
|
361 |
+
3. **Generate** → Enter details and click generate
|
362 |
+
|
363 |
+
### 🎯 What Each Tab Does:
|
364 |
+
- **🖼️ Single Image**: Create one professional marketing image
|
365 |
+
- **🔄 A/B Testing**: Generate variations to test what works best
|
366 |
+
- **📱 Social Media**: Create multiple platform-sized images at once
|
367 |
+
- **🤖 AI Assistant**: Let AI write the perfect prompt for you
|
368 |
+
""")
|
369 |
+
|
370 |
+
with gr.Accordion("📚 Detailed Guide & Tips", open=False):
|
371 |
+
gr.Markdown("""
|
372 |
+
## Welcome to AI Marketing Content Generator!
|
373 |
+
|
374 |
+
This tool helps you create professional marketing images using AI. Here's how to use each feature:
|
375 |
+
|
376 |
+
### 🚀 Quick Start Guide
|
377 |
+
|
378 |
+
1. **Wait for Connection**: The status above should show "✅ Connected" before you start
|
379 |
+
2. **Choose Your Task**: Select a tab based on what you need
|
380 |
+
3. **Enter Details**: Fill in the prompts and settings
|
381 |
+
4. **Generate**: Click the generate button and wait for results
|
382 |
+
|
383 |
+
### 📑 Feature Overview
|
384 |
+
|
385 |
+
#### 1️⃣ Single Image Tab
|
386 |
+
- **Best for**: Individual marketing assets, blog headers, social posts
|
387 |
+
- **How to use**:
|
388 |
+
1. Enter a descriptive prompt
|
389 |
+
2. Choose a style preset (optional)
|
390 |
+
3. Adjust quality with steps (higher = better but slower)
|
391 |
+
4. Click Generate
|
392 |
+
|
393 |
+
#### 2️⃣ A/B Testing Batch Tab
|
394 |
+
- **Best for**: Creating variations to test engagement
|
395 |
+
- **How to use**:
|
396 |
+
1. Enter your base prompt
|
397 |
+
2. Select how many variations (2-5)
|
398 |
+
3. Generate and compare results
|
399 |
+
|
400 |
+
#### 3️⃣ Social Media Pack Tab
|
401 |
+
- **Best for**: Multi-platform campaigns
|
402 |
+
- **How to use**:
|
403 |
+
1. Enter your content prompt
|
404 |
+
2. Check the platforms you need
|
405 |
+
3. Generate all sizes at once
|
406 |
+
|
407 |
+
#### 4️⃣ Prompt Templates Tab
|
408 |
+
- **Best for**: Quick professional prompts
|
409 |
+
- **How to use**:
|
410 |
+
1. Select a template type
|
411 |
+
2. Fill in the required variables
|
412 |
+
3. Apply template
|
413 |
+
4. Use the generated prompt
|
414 |
+
|
415 |
+
### 💡 Pro Tips
|
416 |
+
|
417 |
+
- **Be Specific**: "Product photo of red shoes on white background" > "shoes"
|
418 |
+
- **Use Styles**: Apply style presets for consistent branding
|
419 |
+
- **Higher Steps**: Use 70-100 steps for final production images
|
420 |
+
- **Test First**: Use lower steps (30-40) for quick tests
|
421 |
+
- **Save Prompts**: Keep successful prompts for future use
|
422 |
+
|
423 |
+
### 🎯 Common Use Cases
|
424 |
+
|
425 |
+
1. **E-commerce Product Photos**
|
426 |
+
```
|
427 |
+
Prompt: "Professional product photography of [product], white background, studio lighting"
|
428 |
+
Style: professional
|
429 |
+
Steps: 70
|
430 |
+
```
|
431 |
+
|
432 |
+
2. **Social Media Announcements**
|
433 |
+
```
|
434 |
+
Prompt: "Eye-catching announcement graphic for [event/sale], bold colors"
|
435 |
+
Style: playful
|
436 |
+
Steps: 50
|
437 |
+
```
|
438 |
+
|
439 |
+
3. **Blog Headers**
|
440 |
+
```
|
441 |
+
Prompt: "Minimalist header image about [topic], modern design"
|
442 |
+
Style: minimalist
|
443 |
+
Steps: 50
|
444 |
+
```
|
445 |
+
|
446 |
+
### ⚠️ Troubleshooting
|
447 |
+
|
448 |
+
- **"Not Connected" Error**: Wait 10 seconds and refresh the page
|
449 |
+
- **Timeout Errors**: The GPU might be cold starting, try again
|
450 |
+
- **Poor Quality**: Increase steps to 70+ for better results
|
451 |
+
- **Wrong Style**: Make sure to select style before generating
|
452 |
+
""")
|
453 |
+
|
454 |
+
# Single Image Generation Tab
|
455 |
+
with gr.TabItem("🖼️ Single Image"):
|
456 |
+
with gr.Row():
|
457 |
+
with gr.Column():
|
458 |
+
single_prompt = gr.Textbox(
|
459 |
+
label="Prompt",
|
460 |
+
placeholder="Describe your image in detail...\nExample: Professional headshot of business person in modern office",
|
461 |
+
lines=3
|
462 |
+
)
|
463 |
+
with gr.Row():
|
464 |
+
single_style = gr.Dropdown(
|
465 |
+
choices=["none", "professional", "playful",
|
466 |
+
"minimalist", "luxury", "tech"],
|
467 |
+
value="none",
|
468 |
+
label="Style Preset",
|
469 |
+
info="Apply a consistent style to your image"
|
470 |
+
)
|
471 |
+
single_steps = gr.Slider(
|
472 |
+
10, 100, 50,
|
473 |
+
step=10,
|
474 |
+
label="Quality (Inference Steps)",
|
475 |
+
info="Higher = better quality but slower"
|
476 |
+
)
|
477 |
+
single_btn = gr.Button(
|
478 |
+
"🎨 Generate Image", variant="primary", size="lg")
|
479 |
+
|
480 |
+
# Quick examples
|
481 |
+
with gr.Accordion("💭 Example Ideas",open=False):
|
482 |
+
gr.Examples(
|
483 |
+
examples=[
|
484 |
+
["""This poster is dominated by blue-purple neon lights, with the background of a hyper city at night, with towering skyscrapers surrounded by colorful LED light strips. In the center of the picture is a young steampunk modern robot with virtual information interfaces and digital codes floating around him. The future fonted title "CYNAPTICS" is in neon blue, glowing, as if outlined by laser, exuding a sense of technology and a cold and mysterious atmosphere. The small words "FUTURE IS NOW" seem to be calling the audience to the future, full of science fiction and trendy charm""", "professional", 50],
|
485 |
+
["poster of,a white girl,A young korean woman pose with a white Vespa scooter on a sunny day,dressed in a stylish red and white jacket .inside a jacket is strapless,with a casual denim skirt. She wears a helmet with vintage-style goggles,and converse sneakers,adding a retro touch to her outfit. The bright sunlight highlights her relaxed and cheerful expression,and the Vespaâs white color pops against the clear blue sky. The background features a vibrant,sunlit scene with a few trees or distant buildings,creating a fresh and joyful atmosphere. Art style: realistic,high detail,vibrant colors,warm and cheerful.,f1.4 50mm,commercial photo style,with text around is 'Chasing the sun on my Vespa nothing but the open road ahead'", "playful", 40],
|
486 |
+
["""Badminton is not just about winning, it’s about daring to challenge the limits of speed and precision. It’s a game where every strike is a test of reflexes, every point a moment of courage. To play badminton is to engage in a battle of endurance, strategy, and passion.""", "minimalist", 50],
|
487 |
+
],
|
488 |
+
inputs=[single_prompt, single_style, single_steps],
|
489 |
+
label="Quick Examples"
|
490 |
+
)
|
491 |
+
|
492 |
+
with gr.Column():
|
493 |
+
single_output = gr.Image(
|
494 |
+
label="Generated Image", type="filepath")
|
495 |
+
single_status = gr.Textbox(
|
496 |
+
label="Status", lines=3, interactive=False)
|
497 |
+
|
498 |
+
# Batch Generation Tab
|
499 |
+
with gr.TabItem("🔄 A/B Testing Batch"):
|
500 |
+
gr.Markdown("""
|
501 |
+
### Generate Strategic Variations for Testing
|
502 |
+
Create different versions that test specific elements to optimize your content performance.
|
503 |
+
Each variation tests a different hypothesis about what works best for your audience.
|
504 |
+
""")
|
505 |
+
with gr.Row():
|
506 |
+
with gr.Column():
|
507 |
+
batch_prompt = gr.Textbox(
|
508 |
+
label="Base Content Prompt",
|
509 |
+
placeholder="Describe your core content idea...\nExample: Professional announcement for new product launch",
|
510 |
+
lines=3
|
511 |
+
)
|
512 |
+
batch_variation_type = gr.Dropdown(
|
513 |
+
choices=[
|
514 |
+
("🎨 Mixed Strategy (Recommended)", "mixed"),
|
515 |
+
("🌈 Color Psychology Test", "color_schemes"),
|
516 |
+
("📐 Layout & Composition Test", "composition_styles"),
|
517 |
+
("😊 Emotional Tone Test", "emotional_tones"),
|
518 |
+
("📱 Platform Optimization Test", "social_media"),
|
519 |
+
("👁️ Attention-Grabbing Test", "engagement_hooks"),
|
520 |
+
("🏷️ Brand Positioning Test", "brand_positioning")
|
521 |
+
],
|
522 |
+
value="mixed",
|
523 |
+
label="Testing Strategy",
|
524 |
+
info="Choose what aspect you want to test"
|
525 |
+
)
|
526 |
+
with gr.Row():
|
527 |
+
batch_count = gr.Slider(
|
528 |
+
2, 5, 3,
|
529 |
+
step=1,
|
530 |
+
label="Number of Variations",
|
531 |
+
info="How many different versions to generate"
|
532 |
+
)
|
533 |
+
batch_steps = gr.Slider(
|
534 |
+
10, 100, 40,
|
535 |
+
label="Quality (Inference Steps)",info="Lower steps for quick testing")
|
536 |
+
|
537 |
+
batch_btn = gr.Button(
|
538 |
+
"🔄 Generate Variations", variant="primary", size="lg")
|
539 |
+
|
540 |
+
strategy_info = gr.Markdown("""
|
541 |
+
**💡 Current Strategy:** Mixed approach testing multiple variables
|
542 |
+
**What this tests:** Different colors, layouts, and styles to find what works best
|
543 |
+
**How to use results:** Post each variation and compare engagement metrics
|
544 |
+
""")
|
545 |
+
|
546 |
+
|
547 |
+
with gr.Column():
|
548 |
+
batch_output = gr.Gallery(
|
549 |
+
label="Generated Test Variations",
|
550 |
+
columns=2,
|
551 |
+
height="auto"
|
552 |
+
)
|
553 |
+
batch_status = gr.Textbox(
|
554 |
+
label="Variation Details", lines=6, interactive=False)
|
555 |
+
with gr.Accordion("📊 A/B Testing Guide",open=False):
|
556 |
+
gr.Markdown("""
|
557 |
+
**Step 1:** Generate variations above
|
558 |
+
**Step 2:** Post each variation to your platform
|
559 |
+
**Step 3:** Track these metrics for each:
|
560 |
+
- Engagement rate (likes, comments, shares)
|
561 |
+
- Click-through rate (if applicable)
|
562 |
+
- Reach and impressions
|
563 |
+
- Save/bookmark rate
|
564 |
+
|
565 |
+
**Step 4:** Use the best performer for future content
|
566 |
+
|
567 |
+
**💡 Pro Tips:**
|
568 |
+
- Test one element at a time for clear results
|
569 |
+
- Run tests for at least 7 days
|
570 |
+
- Use the same posting time and hashtags
|
571 |
+
- Need 1000+ views per variation for statistical significance
|
572 |
+
""")
|
573 |
+
# Social Media Tab
|
574 |
+
with gr.TabItem("📱 Social Media Pack"):
|
575 |
+
gr.Markdown("""
|
576 |
+
### Generate Platform-Optimized Images
|
577 |
+
Create perfectly sized images for multiple social media platforms at once.
|
578 |
+
""")
|
579 |
+
with gr.Row():
|
580 |
+
with gr.Column():
|
581 |
+
social_prompt = gr.Textbox(
|
582 |
+
label="Content Prompt",
|
583 |
+
placeholder="Describe your social media content...\nExample: Exciting announcement for new product launch",
|
584 |
+
lines=3
|
585 |
+
)
|
586 |
+
social_platforms = gr.CheckboxGroup(
|
587 |
+
choices=[
|
588 |
+
("Instagram Post (1080x1080)", "instagram_post"),
|
589 |
+
("Instagram Story (1080x1920)", "instagram_story"),
|
590 |
+
("Twitter Post (1200x675)", "twitter_post"),
|
591 |
+
("LinkedIn Post (1200x1200)", "linkedin_post"),
|
592 |
+
("Facebook Cover (1200x630)", "facebook_cover"),
|
593 |
+
("YouTube Thumbnail (1280x720)", "youtube_thumbnail")
|
594 |
+
],
|
595 |
+
value=["instagram_post", "twitter_post"],
|
596 |
+
label="Select Platforms",
|
597 |
+
info="Each platform will get an optimized image"
|
598 |
+
)
|
599 |
+
social_steps = gr.Slider(
|
600 |
+
10, 100, 50,
|
601 |
+
label="Quality (Inference Steps)"
|
602 |
+
)
|
603 |
+
social_btn = gr.Button(
|
604 |
+
"📱 Generate Social Pack", variant="primary", size="lg")
|
605 |
+
|
606 |
+
with gr.Column():
|
607 |
+
social_output = gr.Gallery(
|
608 |
+
label="Platform-Optimized Images",
|
609 |
+
columns=2,
|
610 |
+
height="auto"
|
611 |
+
)
|
612 |
+
social_status = gr.Textbox(
|
613 |
+
label="Status", lines=4, interactive=False)
|
614 |
+
|
615 |
+
with gr.TabItem("🤖 AI Prompt Assistant"):
|
616 |
+
# Mobile-friendly header
|
617 |
+
with gr.Column():
|
618 |
+
gr.Markdown("### 🤖 AI-Powered Prompt Creation")
|
619 |
+
with gr.Accordion("💡 How This Works", open=False):
|
620 |
+
gr.Markdown("""
|
621 |
+
**Simple 3-step process:**
|
622 |
+
1. Describe what you want in plain English
|
623 |
+
2. AI creates an optimized prompt
|
624 |
+
3. Generate your professional image
|
625 |
+
""")
|
626 |
+
|
627 |
+
# Main content - responsive layout
|
628 |
+
with gr.Row():
|
629 |
+
# Left column - Input section
|
630 |
+
with gr.Column(scale=1, min_width=300):
|
631 |
+
ai_user_input = gr.Textbox(
|
632 |
+
label="What do you want to create?",
|
633 |
+
placeholder="Example: A hero image for my new eco-friendly water bottle product launch",
|
634 |
+
lines=4,
|
635 |
+
info="Describe your vision in plain language"
|
636 |
+
)
|
637 |
+
|
638 |
+
# Mobile-friendly dropdown grid
|
639 |
+
with gr.Group():
|
640 |
+
gr.Markdown("#### Settings")
|
641 |
+
# Stack dropdowns vertically on mobile
|
642 |
+
ai_context = gr.Dropdown(
|
643 |
+
choices=[
|
644 |
+
("General Marketing", "marketing"),
|
645 |
+
("Product Photography", "product"),
|
646 |
+
("Social Media Post", "social"),
|
647 |
+
("Blog/Article Header", "blog"),
|
648 |
+
("Event Promotion", "event"),
|
649 |
+
("Brand Identity", "brand")
|
650 |
+
],
|
651 |
+
value="marketing",
|
652 |
+
label="Content Type",
|
653 |
+
info="What are you creating?"
|
654 |
+
)
|
655 |
+
ai_style = gr.Dropdown(
|
656 |
+
choices=[
|
657 |
+
("Professional", "professional"),
|
658 |
+
("Playful & Fun", "playful"),
|
659 |
+
("Minimalist", "minimalist"),
|
660 |
+
("Luxury", "luxury"),
|
661 |
+
("Tech/Modern", "tech"),
|
662 |
+
("Natural/Organic", "natural")
|
663 |
+
],
|
664 |
+
value="professional",
|
665 |
+
label="Style",
|
666 |
+
info="What mood to convey?"
|
667 |
+
)
|
668 |
+
ai_platform = gr.Dropdown(
|
669 |
+
choices=[
|
670 |
+
("General Use", "general"),
|
671 |
+
("Instagram", "instagram"),
|
672 |
+
("Twitter/X", "twitter"),
|
673 |
+
("LinkedIn", "linkedin"),
|
674 |
+
("Facebook", "facebook"),
|
675 |
+
("Website Hero", "website")
|
676 |
+
],
|
677 |
+
value="general",
|
678 |
+
label="Platform",
|
679 |
+
info="Where will this be used?"
|
680 |
+
)
|
681 |
+
|
682 |
+
# Generate button - full width
|
683 |
+
ai_generate_btn = gr.Button(
|
684 |
+
"🤖 Generate AI Prompt",
|
685 |
+
variant="primary",
|
686 |
+
size="lg",
|
687 |
+
scale=1
|
688 |
+
)
|
689 |
+
|
690 |
+
# Quick examples - collapsible on mobile
|
691 |
+
with gr.Accordion("💭 Example Ideas", open=False):
|
692 |
+
gr.Examples(
|
693 |
+
examples=[
|
694 |
+
["A hero image for my new eco-friendly water bottle", "product", "natural", "website"],
|
695 |
+
["Announcement for our Black Friday sale", "social", "playful", "instagram"],
|
696 |
+
["Professional headshots for company about page", "marketing", "professional", "linkedin"],
|
697 |
+
["Blog header about AI in marketing", "blog", "tech", "general"],
|
698 |
+
["Product showcase for luxury watch collection", "product", "luxury", "instagram"]
|
699 |
+
],
|
700 |
+
inputs=[ai_user_input, ai_context, ai_style, ai_platform],
|
701 |
+
label=None
|
702 |
+
)
|
703 |
+
|
704 |
+
# Right column - Output section
|
705 |
+
with gr.Column(scale=1, min_width=300):
|
706 |
+
ai_generated_prompt = gr.Textbox(
|
707 |
+
label="AI-Generated Prompt",
|
708 |
+
lines=6,
|
709 |
+
interactive=True,
|
710 |
+
info="Edit this prompt if needed"
|
711 |
+
)
|
712 |
+
|
713 |
+
ai_status = gr.Textbox(
|
714 |
+
label="Status",
|
715 |
+
lines=2,
|
716 |
+
interactive=False
|
717 |
+
)
|
718 |
+
|
719 |
+
# Action buttons - stack on mobile
|
720 |
+
with gr.Row():
|
721 |
+
ai_use_prompt_btn = gr.Button(
|
722 |
+
"🎨 Generate Image",
|
723 |
+
variant="primary",
|
724 |
+
scale=2
|
725 |
+
)
|
726 |
+
ai_save_prompt_btn = gr.Button(
|
727 |
+
"💾 Save to Single Tab",
|
728 |
+
variant="secondary",
|
729 |
+
scale=1
|
730 |
+
)
|
731 |
+
|
732 |
+
# Advanced options - collapsed by default
|
733 |
+
with gr.Accordion("🔧 Advanced Prompt Refinement", open=False):
|
734 |
+
ai_improvement_request = gr.Textbox(
|
735 |
+
label="How to improve this prompt?",
|
736 |
+
placeholder="Example: Add more dramatic lighting, make it more colorful, include people",
|
737 |
+
lines=2
|
738 |
+
)
|
739 |
+
ai_improve_btn = gr.Button(
|
740 |
+
"✨ Improve Prompt",
|
741 |
+
variant="secondary",
|
742 |
+
size="sm"
|
743 |
+
)
|
744 |
+
|
745 |
+
# Preview image
|
746 |
+
ai_preview_image = gr.Image(
|
747 |
+
label="Generated Image Preview",
|
748 |
+
type="filepath",
|
749 |
+
visible=False,
|
750 |
+
height=300
|
751 |
+
)
|
752 |
+
|
753 |
+
# Bottom tips section - full width
|
754 |
+
with gr.Accordion("🎯 Pro Tips for Better Results", open=False):
|
755 |
+
with gr.Row():
|
756 |
+
with gr.Column():
|
757 |
+
gr.Markdown("""
|
758 |
+
**Be Specific About:**
|
759 |
+
- **Subject**: What's the main focus?
|
760 |
+
- **Setting**: Where is it happening?
|
761 |
+
- **Mood**: What feeling to convey?
|
762 |
+
- **Colors**: Any specific palette?
|
763 |
+
""")
|
764 |
+
with gr.Column():
|
765 |
+
gr.Markdown("""
|
766 |
+
**Good Examples:**
|
767 |
+
- ✅ "Minimalist product photo of smartphone on marble"
|
768 |
+
- ✅ "Vibrant Instagram post for summer sale"
|
769 |
+
- ❌ "Product photo" (too vague)
|
770 |
+
- ❌ "Social media post" (not specific)
|
771 |
+
""")
|
772 |
+
|
773 |
+
# Footer
|
774 |
+
gr.Markdown("""
|
775 |
+
---
|
776 |
+
### 🛠️ Powered by:
|
777 |
+
- **Flux AI Model** - State-of-the-art image generation
|
778 |
+
- **Modal Labs** - GPU infrastructure
|
779 |
+
- **MCP Protocol** - Tool integration
|
780 |
+
- **Gradio** - User interface
|
781 |
+
|
782 |
+
Made with ❤️ for content creators and marketers
|
783 |
+
""")
|
784 |
+
|
785 |
+
# Event handlers
|
786 |
+
single_btn.click(
|
787 |
+
single_image_generation,
|
788 |
+
inputs=[single_prompt, single_steps, single_style],
|
789 |
+
outputs=[single_output, single_status]
|
790 |
+
)
|
791 |
+
|
792 |
+
batch_btn.click(
|
793 |
+
enhanced_batch_generation,
|
794 |
+
inputs=[batch_prompt,batch_variation_type, batch_count, batch_steps],
|
795 |
+
outputs=[batch_output, batch_status]
|
796 |
+
)
|
797 |
+
batch_variation_type.change(
|
798 |
+
update_strategy_info,
|
799 |
+
inputs=[batch_variation_type],
|
800 |
+
outputs=[strategy_info]
|
801 |
+
)
|
802 |
+
|
803 |
+
social_btn.click(
|
804 |
+
social_media_generation,
|
805 |
+
inputs=[social_prompt, social_platforms, social_steps],
|
806 |
+
outputs=[social_output, social_status]
|
807 |
+
)
|
808 |
+
|
809 |
+
|
810 |
+
def generate_ai_prompt(user_input, context, style, platform):
|
811 |
+
"""Generate an optimized prompt using AI"""
|
812 |
+
if not marketing_tool.is_connected:
|
813 |
+
return "", "⚠️ MCP Server not connected. Please wait a few seconds and try again."
|
814 |
+
|
815 |
+
if not user_input.strip():
|
816 |
+
return "", "⚠️ Please describe what you want to create."
|
817 |
+
|
818 |
+
try:
|
819 |
+
request_id = f"ai_prompt_{time.time()}"
|
820 |
+
marketing_tool.request_queue.put((
|
821 |
+
"generate_prompt_with_ai",
|
822 |
+
{
|
823 |
+
"user_input": user_input,
|
824 |
+
"context": context,
|
825 |
+
"style": style,
|
826 |
+
"platform": platform
|
827 |
+
},
|
828 |
+
request_id
|
829 |
+
))
|
830 |
+
status, result = wait_for_result(request_id, timeout=60)
|
831 |
+
if status == "success":
|
832 |
+
result_data = json.loads(result)
|
833 |
+
if result_data.get("success"):
|
834 |
+
return result_data["prompt"], "✅ AI prompt generated successfully!"
|
835 |
+
else:
|
836 |
+
return result_data.get("fallback_prompt", ""), f"⚠️ Using fallback prompt: {result_data.get('error', 'Unknown error')}"
|
837 |
+
else:
|
838 |
+
return "", f"❌ Error: {result}"
|
839 |
+
except Exception as e:
|
840 |
+
return "", f"❌ Error: {str(e)}"
|
841 |
+
ai_generate_btn.click(
|
842 |
+
generate_ai_prompt,
|
843 |
+
inputs=[ai_user_input, ai_context, ai_style, ai_platform],
|
844 |
+
outputs=[ai_generated_prompt, ai_status]
|
845 |
+
)
|
846 |
+
|
847 |
+
def improve_ai_prompt(current_prompt, improvement_request):
|
848 |
+
if not marketing_tool.is_connected:
|
849 |
+
return current_prompt, "⚠️ MCP Server not connected."
|
850 |
+
if not current_prompt.strip():
|
851 |
+
return "", "⚠️ No prompt to improve. Generate one first."
|
852 |
+
if not improvement_request.strip():
|
853 |
+
return current_prompt, "⚠️ Please describe how you'd like to improve the prompt."
|
854 |
+
try:
|
855 |
+
enhanced_base = f"{current_prompt}. {improvement_request}"
|
856 |
+
request_id = f"improve_prompt_{time.time()}"
|
857 |
+
marketing_tool.request_queue.put((
|
858 |
+
"enhance_prompt_with_details", # Use the same tool
|
859 |
+
{
|
860 |
+
"base_prompt": enhanced_base,
|
861 |
+
"enhancement_type": "detailed"
|
862 |
+
},
|
863 |
+
request_id
|
864 |
+
))
|
865 |
+
status, result = wait_for_result(request_id, timeout=60)
|
866 |
+
if status == "success":
|
867 |
+
if not result:
|
868 |
+
return current_prompt, "⚠️ Received empty response from server."
|
869 |
+
try:
|
870 |
+
result_data = json.loads(result)
|
871 |
+
if result_data.get("success"):
|
872 |
+
return result_data["enhanced_prompt"], "✅ Prompt improved successfully!"
|
873 |
+
else:
|
874 |
+
return current_prompt, f"⚠️ Could not improve prompt: {result_data.get('error', 'Unknown error')}"
|
875 |
+
except json.JSONDecodeError as json_error:
|
876 |
+
print(f"JSON decode error: {json_error}")
|
877 |
+
print(f"Raw result: {repr(result)}")
|
878 |
+
return result if result else current_prompt, "✅ Prompt improved (received as text)!"
|
879 |
+
|
880 |
+
else:
|
881 |
+
return current_prompt, f"❌ Error: {result}"
|
882 |
+
|
883 |
+
except Exception as e:
|
884 |
+
print(f"Exception in improve_ai_prompt: {str(e)}")
|
885 |
+
return current_prompt, f"❌ Error: {str(e)}"
|
886 |
+
|
887 |
+
ai_improve_btn.click(
|
888 |
+
improve_ai_prompt,
|
889 |
+
inputs=[ai_generated_prompt, ai_improvement_request],
|
890 |
+
outputs=[ai_generated_prompt, ai_status]
|
891 |
+
)
|
892 |
+
|
893 |
+
def generate_image_from_ai_prompt(prompt, show_preview=True):
|
894 |
+
if not prompt.strip():
|
895 |
+
return None, "⚠️ Please generate a prompt first."
|
896 |
+
image_path, status = single_image_generation(prompt, 50, "none")
|
897 |
+
if show_preview and image_path:
|
898 |
+
return gr.update(value=image_path, visible=True), status
|
899 |
+
else:
|
900 |
+
return gr.update(visible=False), status
|
901 |
+
|
902 |
+
ai_use_prompt_btn.click(
|
903 |
+
lambda prompt: generate_image_from_ai_prompt(prompt, True),
|
904 |
+
inputs=[ai_generated_prompt],
|
905 |
+
outputs=[ai_preview_image, ai_status]
|
906 |
+
)
|
907 |
+
ai_save_prompt_btn.click(
|
908 |
+
lambda prompt: (prompt, "✅ Prompt copied to Single Image tab!"),
|
909 |
+
inputs=[ai_generated_prompt],
|
910 |
+
outputs=[single_prompt, ai_status]
|
911 |
+
).then(
|
912 |
+
lambda: gr.update(selected="🖼️ Single Image"),
|
913 |
+
outputs=[]
|
914 |
+
)
|
915 |
+
|
916 |
+
# Update connection status
|
917 |
+
def update_connection_status():
|
918 |
+
if marketing_tool.is_connected:
|
919 |
+
return "✅ **Connected to MCP Server** - Ready to generate!"
|
920 |
+
else:
|
921 |
+
return "🔄 Connecting to MCP server... (please wait)"
|
922 |
+
|
923 |
+
# Periodic status update
|
924 |
+
demo.load(update_connection_status, outputs=[connection_status])
|
925 |
+
|
926 |
+
if __name__ == "__main__":
|
927 |
+
print("Starting Marketing Content Generator...")
|
928 |
+
print("Please wait for MCP server to initialize...")
|
929 |
+
start_mcp_server()
|
930 |
+
time.sleep(5)
|
931 |
+
print("Launching Gradio interface...")
|
932 |
+
demo.launch(share=False, mcp_server=True)
|
mcp_server.py
ADDED
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import base64
|
3 |
+
import requests
|
4 |
+
import asyncio
|
5 |
+
import aiohttp
|
6 |
+
import json
|
7 |
+
from datetime import datetime
|
8 |
+
from typing import List, Dict
|
9 |
+
import zipfile
|
10 |
+
from io import BytesIO
|
11 |
+
from PIL import Image
|
12 |
+
from mcp.server.fastmcp import FastMCP
|
13 |
+
|
14 |
+
mcp = FastMCP("modal_flux_testing", timeout=500)
|
15 |
+
|
16 |
+
# Configuration
|
17 |
+
MODAL_API_URL = "https://rajputvansh4391--flux-api-server-fastapi-server.modal.run"
|
18 |
+
# Add Mistral API configuration
|
19 |
+
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY", "DRdpSRosSmwjj62cbdb3b04xjaPrMKpa")
|
20 |
+
MISTRAL_API_URL = "https://api.mistral.ai/v1/chat/completions"
|
21 |
+
|
22 |
+
# Social Media Size Presets
|
23 |
+
SIZE_PRESETS = {
|
24 |
+
"instagram_post": (1080, 1080),
|
25 |
+
"instagram_story": (1080, 1920),
|
26 |
+
"twitter_post": (1200, 672),
|
27 |
+
"linkedin_post": (1200, 1200),
|
28 |
+
"facebook_cover": (1200, 632),
|
29 |
+
"youtube_thumbnail": (1280, 720),
|
30 |
+
}
|
31 |
+
|
32 |
+
# Prompt Templates for Marketing
|
33 |
+
# Updated Prompt Templates for Cinematic Marketing (based on your examples)
|
34 |
+
PROMPT_TEMPLATES = {
|
35 |
+
"product_hero": """A professional, cinematic product photography composition featuring {product} as the center subject against a {background} backdrop. The scene is illuminated with dramatic studio lighting creating golden highlights and deep shadows. The {product} appears to float with ethereal light emanating from beneath, surrounded by subtle glowing particles and atmospheric mist. The composition uses cinematic depth of field with the product razor-sharp in focus while the background fades into artistic bokeh. Luxurious materials and textures are emphasized with photorealistic detail, showcasing premium quality and craftsmanship.""",
|
36 |
+
|
37 |
+
"social_announcement": """A high-energy, cinematic social media poster announcing {announcement}. The composition features bold, dramatic lighting with vibrant neon glows and electric energy crackling through the frame. Dynamic typography with the announcement text appears in luxurious, impactful metallic font that seems to emerge from the composition. The backdrop features a futuristic cityscape at night with towering skyscrapers surrounded by colorful LED light strips. Atmospheric elements include flowing energy streams, glowing particles, and lens flares that create a sense of excitement and urgency. The entire poster pulses with kinetic energy and modern sophistication.""",
|
38 |
+
|
39 |
+
"blog_header": """An elegant, cinematic header image for a blog post about {topic}. The composition features ethereal lighting with soft golden hour illumination casting dramatic shadows across the scene. In the foreground, symbolic elements related to {topic} are artistically arranged with professional depth of field. The background dissolves into atmospheric mist with subtle bokeh and floating light particles. The color palette consists of warm golds, deep purples, and rich earth tones. The entire composition exudes intellectual sophistication and visual storytelling, with magazine-quality photography aesthetics.""",
|
40 |
+
|
41 |
+
"team_photo": """A cinematic corporate team photograph showing {description} in a modern, luxurious office environment. The scene is lit with dramatic architectural lighting, featuring large floor-to-ceiling windows with natural light streaming in, creating beautiful rim lighting around the subjects. The team is positioned dynamically across multiple levels of the space, with some standing and others seated in premium furniture. The background showcases sleek modern architecture with glass, steel, and wood elements. Professional color grading gives the image a premium, magazine-worthy aesthetic with rich contrasts and warm undertones.""",
|
42 |
+
|
43 |
+
"event_banner": """A spectacular, cinematic event banner for {event} with movie poster-level production value. The composition features epic scale with dramatic perspective and atmospheric depth. Bold, metallic event typography dominates the upper portion with luxurious, impactful font treatment that appears to be forged from light itself. The scene is filled with dynamic elements: swirling energy, floating particles, dramatic spotlights cutting through atmospheric haze, and architectural elements that frame the composition. The color palette uses deep blues, electric purples, and gold accents to create excitement and grandeur.""",
|
44 |
+
|
45 |
+
"testimonial_bg": """An abstract, cinematic background for testimonial content featuring {mood} aesthetic. The composition uses flowing, organic shapes with ethereal lighting effects creating depth and movement. Subtle geometric patterns emerge from atmospheric mist while soft, diffused lighting creates beautiful gradients across the frame. The scene includes floating elements like delicate particles, soft bokeh, and gentle light rays that add visual interest without overwhelming the testimonial text. The color palette is sophisticated and calming, using gradient transitions between complementary colors to create emotional resonance.""",
|
46 |
+
|
47 |
+
"poster_style": """A cinematic movie poster composition featuring {subject} with dramatic, high-impact visual storytelling. The scene is dominated by theatrical lighting with bold contrasts between light and shadow. The main subject is positioned using classical composition rules with supporting elements arranged to guide the eye. Atmospheric elements include swirling mist, dramatic sky, glowing magical effects, and rich environmental details. The composition features layered depth with foreground, midground, and background elements all contributing to the narrative. Typography space is reserved for impactful text placement with the overall mood being {mood}.""",
|
48 |
+
|
49 |
+
"luxury_product": """An ultra-premium product showcase featuring {product} in a luxurious, museum-quality presentation. The item sits on pristine surfaces with perfect reflections, surrounded by architectural elements like marble, gold accents, and crystal. Dramatic lighting creates spectacular highlights and deep shadows, emphasizing every detail and texture. The background features elegant negative space with subtle gradient lighting and floating particles that suggest exclusivity. The entire composition exudes opulence and sophistication, with photorealistic detail that showcases premium craftsmanship and materials."""
|
50 |
+
}
|
51 |
+
|
52 |
+
# Enhanced Style Modifiers (matching your cinematic examples)
|
53 |
+
STYLE_MODIFIERS = {
|
54 |
+
"professional": """professional studio lighting, cinematic composition, dramatic shadows and highlights, premium materials and textures, photorealistic detail, magazine-quality photography, sophisticated color grading, architectural precision, corporate elegance, high-end commercial aesthetics""",
|
55 |
+
|
56 |
+
"playful": """vibrant neon colors, electric energy crackling through the frame, dynamic movement and flow, glowing particles and magical elements, whimsical floating objects, rainbow light effects, kinetic energy, joyful atmosphere, colorful light strips and LED effects, fun and energetic composition""",
|
57 |
+
|
58 |
+
"minimalist": """clean geometric composition, pristine white negative space, single dramatic light source, subtle shadows and highlights, elegant simplicity, floating elements with perfect spacing, monochromatic or limited color palette, architectural precision, zen-like tranquility, museum-quality presentation""",
|
59 |
+
|
60 |
+
"luxury": """opulent materials like gold, marble, and crystal, dramatic chiaroscuro lighting, rich textures and reflections, premium craftsmanship details, sophisticated color palette of deep jewel tones, elegant architectural elements, museum-quality presentation, exclusive atmosphere, metallic accents and flowing fabrics""",
|
61 |
+
|
62 |
+
"tech": """futuristic neon lighting with electric blue and cyan glows, holographic interfaces and digital elements, sleek metallic surfaces with perfect reflections, floating geometric shapes, matrix-style digital rain effects, cyberpunk aesthetic, glowing circuit patterns, high-tech laboratory environment, innovative and cutting-edge atmosphere""",
|
63 |
+
|
64 |
+
"cinematic": """movie poster lighting with dramatic spotlights, atmospheric haze and volumetric fog, epic scale and perspective, rich color grading with deep contrasts, cinematic depth of field, theatrical composition, dramatic sky and environmental elements, professional film-quality aesthetics, storytelling through visual elements""",
|
65 |
+
|
66 |
+
"mystical": """ethereal lighting with soft, magical glows, floating particles and sparkles, misty atmospheric effects, enchanted forest or temple environment, glowing runes and magical symbols, otherworldly color palette of purples and golds, mysterious shadows and light rays, fantasy movie aesthetic, ancient and magical atmosphere""",
|
67 |
+
|
68 |
+
"editorial": """magazine-quality photography lighting, sophisticated composition following rule of thirds, professional color grading, high fashion aesthetic, dramatic contrasts, premium materials and styling, architectural or natural backgrounds, artistic depth of field, editorial sophistication, contemporary visual storytelling"""
|
69 |
+
}
|
70 |
+
|
71 |
+
VARIATION_STRATEGIES = {
|
72 |
+
"color_schemes": [
|
73 |
+
"with warm colors (reds, oranges, yellows)",
|
74 |
+
"with cool colors (blues, greens, purples)",
|
75 |
+
"with bold, high-contrast colors",
|
76 |
+
"with muted, pastel colors",
|
77 |
+
"monochromatic color scheme"
|
78 |
+
],
|
79 |
+
"composition_styles": [
|
80 |
+
"centered composition with symmetrical balance",
|
81 |
+
"rule of thirds composition with dynamic flow",
|
82 |
+
"minimalist composition with lots of white space",
|
83 |
+
"busy, detailed composition with multiple elements",
|
84 |
+
"close-up, focused composition"
|
85 |
+
],
|
86 |
+
"emotional_tones": [
|
87 |
+
"energetic and exciting mood",
|
88 |
+
"calm and peaceful atmosphere",
|
89 |
+
"professional and trustworthy feel",
|
90 |
+
"fun and playful vibe",
|
91 |
+
"luxurious and premium aesthetic"
|
92 |
+
],
|
93 |
+
"visual_styles": [
|
94 |
+
"photorealistic style",
|
95 |
+
"illustrated/graphic design style",
|
96 |
+
"vintage/retro aesthetic",
|
97 |
+
"modern/contemporary look",
|
98 |
+
"artistic/creative approach"
|
99 |
+
],
|
100 |
+
"lighting_moods": [
|
101 |
+
"bright, well-lit scene",
|
102 |
+
"dramatic lighting with shadows",
|
103 |
+
"soft, diffused lighting",
|
104 |
+
"golden hour warm lighting",
|
105 |
+
"studio lighting setup"
|
106 |
+
]
|
107 |
+
}
|
108 |
+
|
109 |
+
# Content creator specific variations
|
110 |
+
CONTENT_CREATOR_VARIATIONS = {
|
111 |
+
"social_media": [
|
112 |
+
"Instagram-optimized with bold text overlay space",
|
113 |
+
"TikTok-style with vertical focus and trending elements",
|
114 |
+
"LinkedIn professional with corporate aesthetic",
|
115 |
+
"YouTube thumbnail with clickable visual hierarchy",
|
116 |
+
"Twitter-friendly with clear, readable elements"
|
117 |
+
],
|
118 |
+
"engagement_hooks": [
|
119 |
+
"with eye-catching focal point in center",
|
120 |
+
"with contrasting element to grab attention",
|
121 |
+
"with human faces or eyes for connection",
|
122 |
+
"with bright colors that pop in feeds",
|
123 |
+
"with intriguing visual question or mystery"
|
124 |
+
],
|
125 |
+
"brand_positioning": [
|
126 |
+
"premium/luxury brand positioning",
|
127 |
+
"affordable/accessible brand feel",
|
128 |
+
"innovative/cutting-edge brand image",
|
129 |
+
"trustworthy/established brand look",
|
130 |
+
"fun/approachable brand personality"
|
131 |
+
]
|
132 |
+
}
|
133 |
+
|
134 |
+
|
135 |
+
# In-memory storage for history (in production, use a database)
|
136 |
+
generation_history = []
|
137 |
+
|
138 |
+
|
139 |
+
@mcp.tool()
|
140 |
+
async def generate_prompt_with_ai(user_input: str, context: str = "marketing", style: str = "professional", platform: str = "general") -> str:
|
141 |
+
"""
|
142 |
+
Use Mistral AI to generate optimized prompts for Flux image generation.
|
143 |
+
|
144 |
+
Args:
|
145 |
+
user_input: What the user wants to create
|
146 |
+
context: The context (marketing, product, social, etc.)
|
147 |
+
style: The desired style
|
148 |
+
platform: Target platform (instagram, twitter, etc.)
|
149 |
+
|
150 |
+
Returns:
|
151 |
+
An optimized prompt for Flux
|
152 |
+
"""
|
153 |
+
try:
|
154 |
+
system_prompt = """You are an expert prompt engineer specializing in creating detailed, cinematic prompts for Flux AI image generation.
|
155 |
+
|
156 |
+
Your prompts should be like movie poster descriptions - highly detailed, vivid, and cinematic. Study these examples:
|
157 |
+
|
158 |
+
GOOD EXAMPLES:
|
159 |
+
- "minimal poster featuring multiple hands coming out of frame holding a globe as the center subject. there is a miniature world on top of the globe, with happy miniature people and lush green trees. the word 'EARTH' appears above the main subject, with a luxurious, impactful font. the backdrop features a minimalistic galaxy background with glowing stars. the main subject is well lit and the poster is vividly colorful."
|
160 |
+
|
161 |
+
- "A high-energy, cinematic movie poster capturing an intense, high-stakes race between Sonic the Hedgehog and a cheetah, set against the vast African savannah at sunset. The poster features bold, dramatic lighting, with the golden glow of the setting sun casting long shadows as both competitors blur across the landscape, dust swirling behind them."
|
162 |
+
|
163 |
+
KEY REQUIREMENTS:
|
164 |
+
1. Be extremely detailed and descriptive
|
165 |
+
2. Include specific lighting details (dramatic lighting, golden glow, well lit, etc.)
|
166 |
+
3. Describe the composition and framing
|
167 |
+
4. Add cinematic and poster-like qualities
|
168 |
+
5. Include color palette descriptions
|
169 |
+
6. Mention text placement if relevant
|
170 |
+
7. Add atmospheric details (mist, smoke, glowing elements)
|
171 |
+
8. Keep under 200 words but pack in maximum detail
|
172 |
+
9. Use vivid, cinematic language
|
173 |
+
10. Focus on visual storytelling
|
174 |
+
|
175 |
+
Generate prompts that sound like professional movie poster or advertisement descriptions."""
|
176 |
+
|
177 |
+
user_message = f"""Create a detailed, cinematic prompt for Flux AI based on:
|
178 |
+
|
179 |
+
User Request: {user_input}
|
180 |
+
Context: {context}
|
181 |
+
Style: {style}
|
182 |
+
Platform: {platform}
|
183 |
+
|
184 |
+
Make it sound like a professional movie poster description with rich visual details, specific lighting, composition, and atmospheric elements. Keep it under 200 words but extremely detailed and vivid."""
|
185 |
+
|
186 |
+
headers = {
|
187 |
+
"Authorization": f"Bearer {MISTRAL_API_KEY}",
|
188 |
+
"Content-Type": "application/json"
|
189 |
+
}
|
190 |
+
|
191 |
+
payload = {
|
192 |
+
"model": "mistral-large-latest",
|
193 |
+
"messages": [
|
194 |
+
{"role": "system", "content": system_prompt},
|
195 |
+
{"role": "user", "content": user_message}
|
196 |
+
],
|
197 |
+
"temperature": 0.8, # Increased for more creativity
|
198 |
+
"max_tokens": 250 # Increased slightly to allow for detailed descriptions
|
199 |
+
}
|
200 |
+
|
201 |
+
async with aiohttp.ClientSession() as session:
|
202 |
+
async with session.post(
|
203 |
+
MISTRAL_API_URL,
|
204 |
+
headers=headers,
|
205 |
+
json=payload,
|
206 |
+
timeout=aiohttp.ClientTimeout(total=30)
|
207 |
+
) as response:
|
208 |
+
if response.status != 200:
|
209 |
+
error_text = await response.text()
|
210 |
+
raise Exception(f"Mistral API error ({response.status}): {error_text}")
|
211 |
+
|
212 |
+
result = await response.json()
|
213 |
+
generated_prompt = result['choices'][0]['message']['content']
|
214 |
+
|
215 |
+
# Ensure the prompt is under 200 words
|
216 |
+
word_count = len(generated_prompt.split())
|
217 |
+
if word_count > 200:
|
218 |
+
# Truncate while preserving sentence structure
|
219 |
+
words = generated_prompt.split()
|
220 |
+
truncated = ' '.join(words[:190])
|
221 |
+
# Find the last complete sentence
|
222 |
+
last_period = truncated.rfind('.')
|
223 |
+
if last_period > 100: # Only truncate if we have a reasonable amount
|
224 |
+
generated_prompt = truncated[:last_period + 1]
|
225 |
+
|
226 |
+
return json.dumps({
|
227 |
+
"success": True,
|
228 |
+
"prompt": generated_prompt,
|
229 |
+
"user_input": user_input,
|
230 |
+
"context": context,
|
231 |
+
"style": style,
|
232 |
+
"word_count": len(generated_prompt.split())
|
233 |
+
})
|
234 |
+
|
235 |
+
except Exception as e:
|
236 |
+
return json.dumps({
|
237 |
+
"success": False,
|
238 |
+
"error": str(e),
|
239 |
+
"fallback_prompt": f"Cinematic {style} style image of {user_input}, dramatic lighting, high detail, professional composition, vivid colors"
|
240 |
+
})
|
241 |
+
|
242 |
+
# Additional function to improve prompt quality based on your examples
|
243 |
+
@mcp.tool()
|
244 |
+
async def enhance_prompt_with_details(base_prompt: str, enhancement_type: str = "cinematic") -> str:
|
245 |
+
"""
|
246 |
+
Enhance a basic prompt with detailed visual elements like your examples.
|
247 |
+
|
248 |
+
Args:
|
249 |
+
base_prompt: The basic prompt to enhance
|
250 |
+
enhancement_type: Type of enhancement (cinematic, poster, product, etc.)
|
251 |
+
|
252 |
+
Returns:
|
253 |
+
Enhanced detailed prompt
|
254 |
+
"""
|
255 |
+
try:
|
256 |
+
system_prompt = """You are an expert at enhancing image prompts with rich visual details. Take the basic prompt and transform it into a highly detailed, cinematic description like a movie poster or professional advertisement.
|
257 |
+
|
258 |
+
Add elements like:
|
259 |
+
- Specific lighting (dramatic, golden glow, well lit, ethereal light)
|
260 |
+
- Composition details (center subject, background elements, framing)
|
261 |
+
- Atmospheric elements (mist, smoke, glowing stars, dust swirling)
|
262 |
+
- Color palette descriptions
|
263 |
+
- Texture and material details
|
264 |
+
- Cinematic qualities and mood
|
265 |
+
- Professional photography/poster qualities
|
266 |
+
|
267 |
+
Keep the enhanced prompt under 200 words but extremely detailed."""
|
268 |
+
|
269 |
+
user_message = f"""Enhance this basic prompt with rich visual details:
|
270 |
+
|
271 |
+
Basic Prompt: {base_prompt}
|
272 |
+
Enhancement Type: {enhancement_type}
|
273 |
+
|
274 |
+
Transform it into a detailed, cinematic description with specific lighting, composition, atmosphere, and visual storytelling elements."""
|
275 |
+
|
276 |
+
headers = {
|
277 |
+
"Authorization": f"Bearer {MISTRAL_API_KEY}",
|
278 |
+
"Content-Type": "application/json"
|
279 |
+
}
|
280 |
+
|
281 |
+
payload = {
|
282 |
+
"model": "mistral-large-latest",
|
283 |
+
"messages": [
|
284 |
+
{"role": "system", "content": system_prompt},
|
285 |
+
{"role": "user", "content": user_message}
|
286 |
+
],
|
287 |
+
"temperature": 0.8,
|
288 |
+
"max_tokens": 250
|
289 |
+
}
|
290 |
+
|
291 |
+
async with aiohttp.ClientSession() as session:
|
292 |
+
async with session.post(
|
293 |
+
MISTRAL_API_URL,
|
294 |
+
headers=headers,
|
295 |
+
json=payload,
|
296 |
+
timeout=aiohttp.ClientTimeout(total=30)
|
297 |
+
) as response:
|
298 |
+
if response.status != 200:
|
299 |
+
error_text = await response.text()
|
300 |
+
raise Exception(f"Mistral API error ({response.status}): {error_text}")
|
301 |
+
|
302 |
+
result = await response.json()
|
303 |
+
enhanced_prompt = result['choices'][0]['message']['content']
|
304 |
+
|
305 |
+
# Ensure under 200 words
|
306 |
+
word_count = len(enhanced_prompt.split())
|
307 |
+
if word_count > 200:
|
308 |
+
words = enhanced_prompt.split()
|
309 |
+
truncated = ' '.join(words[:190])
|
310 |
+
last_period = truncated.rfind('.')
|
311 |
+
if last_period > 100:
|
312 |
+
enhanced_prompt = truncated[:last_period + 1]
|
313 |
+
|
314 |
+
return json.dumps({
|
315 |
+
"success": True,
|
316 |
+
"original_prompt": base_prompt,
|
317 |
+
"enhanced_prompt": enhanced_prompt,
|
318 |
+
"word_count": len(enhanced_prompt.split())
|
319 |
+
})
|
320 |
+
|
321 |
+
except Exception as e:
|
322 |
+
return json.dumps({
|
323 |
+
"success": False,
|
324 |
+
"error": str(e),
|
325 |
+
"original_prompt": base_prompt
|
326 |
+
})
|
327 |
+
|
328 |
+
@mcp.tool()
|
329 |
+
async def generate_and_save_image(prompt: str, num_inference_steps: int = 50, width: int = 1024, height: int = 1024) -> str:
|
330 |
+
"""Generate a single image with specified dimensions"""
|
331 |
+
try:
|
332 |
+
print(f"Sending request to Modal API: {prompt} at {width}x{height}")
|
333 |
+
payload = {
|
334 |
+
"prompt": prompt,
|
335 |
+
"num_inference_steps": num_inference_steps,
|
336 |
+
"width": width,
|
337 |
+
"height": height
|
338 |
+
}
|
339 |
+
|
340 |
+
async with aiohttp.ClientSession() as session:
|
341 |
+
async with session.post(
|
342 |
+
f"{MODAL_API_URL}/generate",
|
343 |
+
json=payload,
|
344 |
+
timeout=aiohttp.ClientTimeout(total=120)
|
345 |
+
) as response:
|
346 |
+
if response.status != 200:
|
347 |
+
error_text = await response.text()
|
348 |
+
raise Exception(f"Modal API error ({response.status}): {error_text}")
|
349 |
+
|
350 |
+
result = await response.text()
|
351 |
+
result_json = json.loads(result)
|
352 |
+
|
353 |
+
if 'image_base64' in result_json:
|
354 |
+
image_b64 = result_json['image_base64']
|
355 |
+
|
356 |
+
# Store in history
|
357 |
+
generation_history.append({
|
358 |
+
"prompt": prompt,
|
359 |
+
"timestamp": datetime.now().isoformat(),
|
360 |
+
"dimensions": f"{width}x{height}",
|
361 |
+
"image_base64": image_b64[:100] + "..." # Store preview only
|
362 |
+
})
|
363 |
+
|
364 |
+
return image_b64
|
365 |
+
else:
|
366 |
+
raise Exception("No 'image_base64' key found in response")
|
367 |
+
|
368 |
+
except Exception as e:
|
369 |
+
print(f"Error in generate_and_save_image: {str(e)}")
|
370 |
+
raise Exception(f"Error generating image: {str(e)}")
|
371 |
+
|
372 |
+
@mcp.tool()
|
373 |
+
async def batch_generate_smart_variations(prompt: str, count: int = 3, variation_type: str = "mixed", num_inference_steps: int = 50, width: int = 1024, height: int = 1024) -> str:
|
374 |
+
"""
|
375 |
+
Generate multiple meaningful variations for A/B testing content.
|
376 |
+
|
377 |
+
variation_type options:
|
378 |
+
- "mixed": Different strategies (recommended for general testing)
|
379 |
+
- "color_schemes": Test different color approaches
|
380 |
+
- "composition_styles": Test different layouts
|
381 |
+
- "emotional_tones": Test different moods
|
382 |
+
- "social_media": Platform-optimized variations
|
383 |
+
- "engagement_hooks": Test attention-grabbing elements
|
384 |
+
- "brand_positioning": Test different brand feels
|
385 |
+
"""
|
386 |
+
if count > 5:
|
387 |
+
count = 5
|
388 |
+
|
389 |
+
variations = []
|
390 |
+
|
391 |
+
if variation_type == "mixed":
|
392 |
+
# Mix different strategies for comprehensive testing
|
393 |
+
all_variations = []
|
394 |
+
for strategy in VARIATION_STRATEGIES.values():
|
395 |
+
all_variations.extend(strategy[:2]) # Take 2 from each strategy
|
396 |
+
|
397 |
+
# Add content creator specific ones
|
398 |
+
all_variations.extend(CONTENT_CREATOR_VARIATIONS["engagement_hooks"][:2])
|
399 |
+
|
400 |
+
# Randomly select variations
|
401 |
+
import random
|
402 |
+
selected_variations = random.sample(all_variations, min(count, len(all_variations)))
|
403 |
+
|
404 |
+
elif variation_type in VARIATION_STRATEGIES:
|
405 |
+
selected_variations = VARIATION_STRATEGIES[variation_type][:count]
|
406 |
+
elif variation_type in CONTENT_CREATOR_VARIATIONS:
|
407 |
+
selected_variations = CONTENT_CREATOR_VARIATIONS[variation_type][:count]
|
408 |
+
else:
|
409 |
+
# Fallback to mixed
|
410 |
+
selected_variations = [
|
411 |
+
"with vibrant, attention-grabbing colors",
|
412 |
+
"with professional, clean aesthetic",
|
413 |
+
"with bold, dramatic composition"
|
414 |
+
][:count]
|
415 |
+
|
416 |
+
results = []
|
417 |
+
|
418 |
+
for i, variation in enumerate(selected_variations):
|
419 |
+
enhanced_prompt = f"{prompt}, {variation}"
|
420 |
+
|
421 |
+
try:
|
422 |
+
print(f"Generating variation {i+1}/{count}: {variation}")
|
423 |
+
image_b64 = await generate_and_save_image(enhanced_prompt, num_inference_steps, width, height)
|
424 |
+
|
425 |
+
results.append({
|
426 |
+
"index": i,
|
427 |
+
"variation_description": variation,
|
428 |
+
"full_prompt": enhanced_prompt,
|
429 |
+
"dimensions": f"{width}x{height}",
|
430 |
+
"image_base64": image_b64,
|
431 |
+
"testing_purpose": get_testing_purpose(variation)
|
432 |
+
})
|
433 |
+
|
434 |
+
except Exception as e:
|
435 |
+
print(f"Error generating variation {i+1}: {str(e)}")
|
436 |
+
|
437 |
+
return json.dumps({
|
438 |
+
"images": results,
|
439 |
+
"count": len(results),
|
440 |
+
"variation_type": variation_type,
|
441 |
+
"testing_strategy": get_testing_strategy(variation_type)
|
442 |
+
})
|
443 |
+
|
444 |
+
def get_testing_purpose(variation: str) -> str:
|
445 |
+
"""Get the testing purpose for a variation"""
|
446 |
+
if "warm colors" in variation or "cool colors" in variation:
|
447 |
+
return "Test color psychology impact on engagement"
|
448 |
+
elif "centered" in variation or "rule of thirds" in variation:
|
449 |
+
return "Test composition impact on visual flow"
|
450 |
+
elif "energetic" in variation or "calm" in variation:
|
451 |
+
return "Test emotional response and brand perception"
|
452 |
+
elif "Instagram" in variation or "TikTok" in variation:
|
453 |
+
return "Test platform-specific optimization"
|
454 |
+
elif "premium" in variation or "affordable" in variation:
|
455 |
+
return "Test brand positioning and target audience appeal"
|
456 |
+
elif "eye-catching" in variation or "contrasting" in variation:
|
457 |
+
return "Test attention-grabbing effectiveness"
|
458 |
+
else:
|
459 |
+
return "Test visual style preference"
|
460 |
+
|
461 |
+
def get_testing_strategy(variation_type: str) -> str:
|
462 |
+
"""Get testing strategy explanation"""
|
463 |
+
strategies = {
|
464 |
+
"mixed": "Comprehensive A/B test across multiple variables to identify best overall approach",
|
465 |
+
"color_schemes": "Test how different colors affect engagement and emotional response",
|
466 |
+
"composition_styles": "Test how layout affects visual hierarchy and user attention",
|
467 |
+
"emotional_tones": "Test which mood resonates best with your target audience",
|
468 |
+
"social_media": "Test platform-specific optimizations for maximum reach",
|
469 |
+
"engagement_hooks": "Test attention-grabbing elements for better click-through rates",
|
470 |
+
"brand_positioning": "Test how different brand feels affect audience perception"
|
471 |
+
}
|
472 |
+
return strategies.get(variation_type, "Test different approaches to optimize content performance")
|
473 |
+
|
474 |
+
@mcp.tool()
|
475 |
+
async def generate_ab_test_report_template(variations_data: str) -> str:
|
476 |
+
"""Generate a template for tracking A/B test results"""
|
477 |
+
import json
|
478 |
+
data = json.loads(variations_data)
|
479 |
+
|
480 |
+
report_template = {
|
481 |
+
"test_name": "Content Variation A/B Test",
|
482 |
+
"test_date": datetime.now().isoformat(),
|
483 |
+
"variation_type": data.get("variation_type", "mixed"),
|
484 |
+
"testing_strategy": data.get("testing_strategy", ""),
|
485 |
+
"variations": [],
|
486 |
+
"metrics_to_track": [
|
487 |
+
"Impressions",
|
488 |
+
"Engagement Rate (%)",
|
489 |
+
"Click-through Rate (%)",
|
490 |
+
"Saves/Shares",
|
491 |
+
"Comments",
|
492 |
+
"Conversion Rate (%)"
|
493 |
+
],
|
494 |
+
"recommended_test_duration": "7-14 days for statistical significance",
|
495 |
+
"sample_size_needed": "Minimum 1000 impressions per variation"
|
496 |
+
}
|
497 |
+
|
498 |
+
for img in data.get("images", []):
|
499 |
+
report_template["variations"].append({
|
500 |
+
"variation_id": f"V{img['index'] + 1}",
|
501 |
+
"description": img["variation_description"],
|
502 |
+
"testing_purpose": img["testing_purpose"],
|
503 |
+
"results": {
|
504 |
+
"impressions": 0,
|
505 |
+
"engagement_rate": 0,
|
506 |
+
"ctr": 0,
|
507 |
+
"saves": 0,
|
508 |
+
"comments": 0,
|
509 |
+
"conversion_rate": 0
|
510 |
+
},
|
511 |
+
"notes": ""
|
512 |
+
})
|
513 |
+
|
514 |
+
return json.dumps(report_template, indent=2)
|
515 |
+
|
516 |
+
|
517 |
+
@mcp.tool()
|
518 |
+
async def batch_generate_images(prompt: str, count: int = 3, num_inference_steps: int = 50, width: int = 1024, height: int = 1024) -> str:
|
519 |
+
"""
|
520 |
+
Generate multiple images with smart variations for A/B testing.
|
521 |
+
Now uses meaningful variations instead of identical images.
|
522 |
+
"""
|
523 |
+
return await batch_generate_smart_variations(
|
524 |
+
prompt=prompt,
|
525 |
+
count=count,
|
526 |
+
variation_type="mixed",
|
527 |
+
num_inference_steps=num_inference_steps,
|
528 |
+
width=width,
|
529 |
+
height=height
|
530 |
+
)
|
531 |
+
|
532 |
+
|
533 |
+
@mcp.tool()
|
534 |
+
async def generate_social_media_set(prompt: str, platforms: List[str], num_inference_steps: int = 50) -> str:
|
535 |
+
"""
|
536 |
+
Generate images optimized for different social media platforms with correct resolutions.
|
537 |
+
Platforms: instagram_post, instagram_story, twitter_post, linkedin_post, etc.
|
538 |
+
"""
|
539 |
+
results = []
|
540 |
+
|
541 |
+
for platform in platforms:
|
542 |
+
if platform in SIZE_PRESETS:
|
543 |
+
width, height = SIZE_PRESETS[platform]
|
544 |
+
platform_prompt = f"{prompt}, optimized for {platform.replace('_', ' ')}"
|
545 |
+
|
546 |
+
try:
|
547 |
+
# Now pass the actual dimensions to the generation function
|
548 |
+
image_b64 = await generate_and_save_image(
|
549 |
+
platform_prompt,
|
550 |
+
num_inference_steps,
|
551 |
+
width,
|
552 |
+
height
|
553 |
+
)
|
554 |
+
results.append({
|
555 |
+
"platform": platform,
|
556 |
+
"size": [width, height],
|
557 |
+
"resolution": f"{width}x{height}",
|
558 |
+
"image_base64": image_b64
|
559 |
+
})
|
560 |
+
print(f"✅ Generated {platform} image at {width}x{height}")
|
561 |
+
except Exception as e:
|
562 |
+
print(f"Error generating for {platform}: {str(e)}")
|
563 |
+
|
564 |
+
return json.dumps({"results": results})
|
565 |
+
|
566 |
+
@mcp.tool()
|
567 |
+
async def add_style_modifier(prompt: str, style: str) -> str:
|
568 |
+
"""
|
569 |
+
Add style modifiers to ensure brand consistency.
|
570 |
+
Styles: professional, playful, minimalist, luxury, tech
|
571 |
+
"""
|
572 |
+
if style not in STYLE_MODIFIERS:
|
573 |
+
return json.dumps({
|
574 |
+
"error": f"Style '{style}' not found",
|
575 |
+
"available_styles": list(STYLE_MODIFIERS.keys())
|
576 |
+
})
|
577 |
+
|
578 |
+
enhanced_prompt = f"{prompt}, {STYLE_MODIFIERS[style]}"
|
579 |
+
return json.dumps({
|
580 |
+
"original_prompt": prompt,
|
581 |
+
"enhanced_prompt": enhanced_prompt,
|
582 |
+
"style_applied": style
|
583 |
+
})
|
584 |
+
|
585 |
+
@mcp.tool()
|
586 |
+
async def get_generation_history(limit: int = 10) -> str:
|
587 |
+
"""Get recent generation history for reuse and reference"""
|
588 |
+
recent_history = generation_history[-limit:]
|
589 |
+
return json.dumps({
|
590 |
+
"history": recent_history,
|
591 |
+
"total_generations": len(generation_history)
|
592 |
+
})
|
593 |
+
|
594 |
+
@mcp.tool()
|
595 |
+
async def create_image_package(image_data_list: List[Dict], package_name: str = "marketing_assets") -> str:
|
596 |
+
"""
|
597 |
+
Create a downloadable package of generated images with metadata.
|
598 |
+
Useful for bulk content creation and organization.
|
599 |
+
"""
|
600 |
+
# This would create a zip file with images and a metadata JSON
|
601 |
+
# For now, return structured data
|
602 |
+
|
603 |
+
package_info = {
|
604 |
+
"package_name": package_name,
|
605 |
+
"created_at": datetime.now().isoformat(),
|
606 |
+
"total_images": len(image_data_list),
|
607 |
+
"images": []
|
608 |
+
}
|
609 |
+
|
610 |
+
for idx, data in enumerate(image_data_list):
|
611 |
+
package_info["images"].append({
|
612 |
+
"filename": f"{package_name}_{idx+1}.png",
|
613 |
+
"prompt": data.get("prompt", ""),
|
614 |
+
"metadata": data.get("metadata", {})
|
615 |
+
})
|
616 |
+
|
617 |
+
return json.dumps(package_info)
|
618 |
+
|
619 |
+
@mcp.tool()
|
620 |
+
async def health_check() -> str:
|
621 |
+
"""Check if the Modal API server is healthy"""
|
622 |
+
try:
|
623 |
+
async with aiohttp.ClientSession() as session:
|
624 |
+
async with session.get(
|
625 |
+
f"{MODAL_API_URL}/health",
|
626 |
+
timeout=aiohttp.ClientTimeout(total=10)
|
627 |
+
) as response:
|
628 |
+
if response.status == 200:
|
629 |
+
result = await response.text()
|
630 |
+
return f"Modal API is healthy: {result}"
|
631 |
+
else:
|
632 |
+
return f"Modal API returned status {response.status}"
|
633 |
+
except Exception as e:
|
634 |
+
return f"Modal API health check failed: {str(e)}"
|
635 |
+
|
636 |
+
if __name__ == "__main__":
|
637 |
+
print("Starting Enhanced MCP server for Content Creators & Marketers...")
|
638 |
+
print(f"Modal API URL: {MODAL_API_URL}")
|
639 |
+
mcp.run(transport="stdio")
|
requirements.txt
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio[mcp]==5.32.1
|
2 |
+
torch>=2.0.0
|
3 |
+
torchvision>=0.15.0
|
4 |
+
diffusers>=0.24.0
|
5 |
+
transformers>=4.35.0
|
6 |
+
accelerate>=0.24.0
|
7 |
+
safetensors>=0.4.0
|
8 |
+
huggingface_hub>=0.19.0
|
9 |
+
Pillow>=9.5.0
|
10 |
+
requests>=2.31.0
|
11 |
+
aiohttp>=3.9.0
|
12 |
+
numpy>=1.24.0
|
13 |
+
sentencepiece>=0.1.99
|
14 |
+
uvicorn
|
15 |
+
modal
|
16 |
+
modal-client
|
17 |
+
fastapi
|
18 |
+
nest_asyncio
|