Automatisation de Génération Vidéo par IA
Contexte du projet
Ce projet a été développé pour automatiser la création de vidéos destinées aux réseaux sociaux (TikTok, Instagram). En utilisant des modèles d'intelligence artificielle hébergés localement, l'objectif était de générer des séquences visuelles à partir de requêtes textuelles tout en réduisant le temps de montage manuel.
L'architecture repose sur un script Python qui interagit directement avec l'API d'un serveur local (comme Runpod ou ComfyUI) pour traiter les lots d'images et les compiler en vidéo.
Extrait du Script d'Appel API
#!/usr/bin/env python3
"""
Générateur de Reels/Shorts complet :
Ollama -> ComfyUI -> Concaténation -> TTS + Sous-titres karaoké mot-à-mot
"""
import os
import sys
import json
import time
import uuid
import asyncio
import requests
from pathlib import Path
from typing import Dict, List, Tuple
import subprocess
import torch
import gc
import edge_tts
from moviepy.editor import (
VideoFileClip,
TextClip,
CompositeVideoClip,
AudioFileClip,
concatenate_videoclips,
)
from moviepy.video.fx import all as vfx
from soustitreollama import *
# ==========================================
# CONFIG GLOBALE
# ==========================================
# 1. Ollama
OLLAMA_MODEL = "gemma3:12b"
OLLAMA_BASE_URL = "http://localhost:11434"
# 2. ComfyUI
COMFY_SERVER = "127.0.0.1:8000"
COMFY_WORKFLOW_FILE = "workflow_api.json"
COMFY_OUTPUT_DIR = r"C:\Users\knipe\Documents\ComfyUI\output\video\hunyuan_video_1.5"
COMFY_PROMPT_NODE_ID = "44"
# 3. Vidéo / Audio / Fonts
TTS_VOICE = "en-US-EricNeural"
FONT_STYLE = r"C:\Code\py\reels\fullocal\reels-qui-marche-\the_bold_font\THEBOLDFONT-FREEVERSION.ttf"
if not os.path.exists(FONT_STYLE):
FONT_STYLE = "Impact" if os.name == "nt" else "DejaVuSans-Bold"
# ==========================================
# PARTIE 1 : OLLAMA
# ==========================================
class ReelsGeneratorAPI:
def __init__(self, model: str = OLLAMA_MODEL, base_url: str = OLLAMA_BASE_URL):
self.model = model
self.base_url = base_url
self.api_url = f"{base_url}/api/generate"
self.verify_ollama()
def verify_ollama(self):
try:
response = requests.get(f"{self.base_url}/api/tags", timeout=5)
if response.status_code == 200:
print(f"✅ Ollama API détecté - Modèle: {self.model}")
else:
print("⚠️ Erreur avec l'API Ollama.")
sys.exit(1)
except Exception as e:
print(f"❌ Erreur de connexion à Ollama: {e}")
sys.exit(1)
def call_ollama(self, prompt: str, max_tokens: int = 500) -> str:
try:
payload = {
"model": self.model,
"prompt": prompt,
"stream": False,
"options": {
"num_predict": max_tokens,
"temperature": 0.7,
"top_p": 0.9,
},
}
response = requests.post(self.api_url, json=payload, timeout=120)
return response.json().get("response", "").strip() if response.status_code == 200 else ""
except Exception as e:
return f"Erreur: {str(e)}"
def unload_model(self):
print(f"\n🧹 Vidage de la VRAM (Déchargement de {self.model})...")
try:
payload = {
"model": self.model,
"keep_alive": 0,
}
# petit hack : endpoint /api/generate avec keep_alive=0 force le déchargement
requests.post(self.api_url, json=payload, timeout=10)
time.sleep(2)
print("✅ VRAM d'Ollama libérée avec succès !")
except Exception as e:
print(f"⚠️ Erreur lors du vidage de la VRAM: {e}")
def generate_voiceover_script(self, topic: str) -> str:
print(" 🎙️ Écriture du script vocal...")
prompt = f"""Act as an expert TikTok/Reels scriptwriter. Write a fast-paced, highly engaging 45-60 second voiceover script about: {topic}.
Structure:
Hook (0-3s): A pattern-interrupting, controversial, or highly relatable opening statement that forces viewers to stop scrolling.
Value/Story (30-40s): Fast, punchy sentences. Deliver immediate value or a compelling narrative. Use simple, conversational language with no fluff.
CTA (5-7s): A clear, low-friction Call to Action (e.g., "Save this for later," "Comment X for the link," or "Follow for part 2").
Tone: Energetic, authentic, and fast-paced.
Formatting: Answer ONLY with the script text. Do not include stage directions or notes.
"""
return self.call_ollama(prompt, max_tokens=600)
def generate_video_clips(self, topic: str) -> List[str]:
print(" 🎬 Génération des scènes...")
prompt = f"""Act as a viral short-form video director. List exactly 10 high-retention visual scenes (roughly 3-6 seconds each) for a TikTok/Reel about: {topic}.
Requirements:
Scene 1 must be visually striking or involve rapid movement to match the hook.
Include quick cuts and dynamic actions (e.g., pointing, screen recordings, text on screen, B-roll with movement).
Keep descriptions concise and highly visual.
Format:
1. [Scene description]
2. [Scene description]
(Continue up to 8)
Answer ONLY with the numbered list. No introductions or explanations.
"""
clips_raw = self.call_ollama(prompt, max_tokens=500)
video_prompts = []
for line in clips_raw.split("\n"):
line = line.strip()
if line and any(line.startswith(str(i)) for i in range(1, 10)):
clean = line.lstrip("0123456789.-*)• ").strip()
if len(clean) > 10:
video_prompts.append(
f"Professional 4K video: {clean}, cinematic lighting, smooth motion"
)
if len(video_prompts) < 5:
video_prompts = [f"4K video: Dynamic scene about {topic}"] * 8
return video_prompts[:8]
def generate_complete_reels(self, user_prompt: str) -> Dict:
print(f"🎬 Génération du concept pour: '{user_prompt}'\n")
voiceover = self.generate_voiceover_script(user_prompt)
voiceover = voiceover.replace("\n", " ").strip()
clips = self.generate_video_clips(user_prompt)
return {
"voiceover_script": voiceover,
"video_clips": clips,
}
# ==========================================
# PARTIE 2 : COMFYUI
# ==========================================
def clear_comfy_output_dir(folder_path):
folder = Path(folder_path)
if folder.exists():
for f in folder.iterdir():
if f.is_file() and f.suffix.lower() in {".mp4", ".avi", ".mov", ".mkv", ".webm"}:
try:
f.unlink()
except:
pass
def send_prompt_to_comfy(prompt_text: str) -> str:
workflow_path = Path(COMFY_WORKFLOW_FILE)
if not workflow_path.exists():
raise SystemExit(f"Fichier API non trouvé: {COMFY_WORKFLOW_FILE}")
prompt_graph = json.loads(workflow_path.read_text(encoding="utf-8"))
if COMFY_PROMPT_NODE_ID not in prompt_graph:
raise ValueError(f"Node {COMFY_PROMPT_NODE_ID} introuvable dans {COMFY_WORKFLOW_FILE}")
if "inputs" not in prompt_graph[COMFY_PROMPT_NODE_ID]:
prompt_graph[COMFY_PROMPT_NODE_ID]["inputs"] = {}
prompt_graph[COMFY_PROMPT_NODE_ID]["inputs"]["text"] = prompt_text
payload = {"prompt": prompt_graph, "client_id": str(uuid.uuid4())}
r = requests.post(f"http://{COMFY_SERVER}/prompt", json=payload)
r.raise_for_status()
return r.json().get("prompt_id")
def wait_for_comfy_job(prompt_id: str):
print(f" ⏳ Attente de ComfyUI (ID: {prompt_id})...")
while True:
try:
r = requests.get(f"http://{COMFY_SERVER}/history/{prompt_id}")
if r.status_code == 200:
history = r.json()
if prompt_id in history:
print(" ✅ Clip généré !")
return
except requests.exceptions.ConnectionError:
pass
time.sleep(3)
# ==========================================
# PARTIE 3 : CONCATÉNATION MOVIEPY
# ==========================================
def concatenate_videos(video_folder_path, output_filename, delete_originals=True):
folder = Path(video_folder_path)
if not folder.exists():
raise FileNotFoundError(f"Le dossier '{video_folder_path}' n'existe pas.")
video_extensions = {".mp4", ".avi", ".mov", ".mkv", ".webm"}
video_files = sorted(
[f for f in folder.iterdir() if f.is_file() and f.suffix.lower() in video_extensions]
)
if not video_files:
raise Exception("Aucune vidéo trouvée pour la concaténation.")
print(f"\nConcaténation de {len(video_files)} clips...")
clips = [VideoFileClip(str(vf)) for vf in video_files]
final_clip = concatenate_videoclips(clips, method="compose")
final_clip.write_videofile(
output_filename,
codec="libx264",
audio_codec="aac",
fps=24,
logger=None,
)
final_clip.close()
for clip in clips:
clip.close()
if delete_originals:
for vf in video_files:
try:
vf.unlink()
except:
pass
# ==========================================
# PARTIE 4 : TTS + SOUS-TITRES KARAOKÉ
# ==========================================
async def tts_with_word_boundaries(text: str, out_audio: str) -> List[Dict]:
"""
Utilise edge-tts en mode streaming pour récupérer :
- un fichier audio MP3,
- une liste de boundaries : [{ "word": str, "start": float, "end": float }, ...]
"""
communicate = edge_tts.Communicate(text, TTS_VOICE)
# edge-tts retourne des "chunks" d'audio + métadonnées type WordBoundary. [web:19][web:27]
boundaries = []
audio_bytes = bytearray()
current_time = 0.0
async for chunk in communicate.stream():
if chunk["type"] == "audio":
audio_bytes.extend(chunk["data"])
current_time = chunk["offset"] / 10_000_000 # ticks (100ns) -> secondes
elif chunk["type"] == "WordBoundary":
# chunk["offset"] est le début du mot, chunk["duration"] la durée du mot
start = chunk["offset"] / 10_000_000
end = (chunk["offset"] + chunk["duration"]) / 10_000_000
word = chunk.get("text", "").strip()
if word:
boundaries.append(
{"word": word, "start": float(start), "end": float(end)}
)
# Sauvegarder l'audio
with open(out_audio, "wb") as f:
f.write(audio_bytes)
return boundaries
def obtenir_duree_audio(audio_file: str) -> float:
try:
audioclip = AudioFileClip(audio_file)
duration = audioclip.duration
audioclip.close()
return duration
except Exception as e:
print(f"Erreur durée audio : {e}")
return 0.0
# ==========================================
# MAIN
# ==========================================
def main():
print("=" * 70)
print("🚀 GÉNÉRATEUR AUTOMATIQUE DE REELS (Ollama -> ComfyUI -> MoviePy)")
print("=" * 70)
user_prompt = input("\n📝 Votre sujet (ex: comment être confiant ?) : ").strip()
if not user_prompt:
user_prompt = "comment être confiant ?"
# 1. Ollama
generator = ReelsGeneratorAPI()
reels_data = generator.generate_complete_reels(user_prompt)
clips_prompts = reels_data["video_clips"]
voiceover_script = reels_data["voiceover_script"]
# Décharger le modèle pour libérer la VRAM
generator.unload_model()
gc.collect()
torch.cuda.empty_cache()
subprocess.run(["ollama", "stop", "gemma3:12b"], capture_output=True)
# 2. ComfyUI
print("\n🧹 Nettoyage de l'ancien dossier de sortie ComfyUI...")
clear_comfy_output_dir(COMFY_OUTPUT_DIR)
print(f"\n🖥️ Lancement de {len(clips_prompts)} générations vidéo sur ComfyUI...")
prompt_ids = []
for idx, clip_prompt in enumerate(clips_prompts):
print(f"Génération clip {idx+1}/8")
pid = send_prompt_to_comfy(f"Video clip {idx+1}: {clip_prompt}")
wait_for_comfy_job(pid)
time.sleep(2)
for pid in prompt_ids:
wait_for_comfy_job(pid)
# 3. Concaténation des vidéos
combined_video_path = "temp_combined.mp4"
concatenate_videos(COMFY_OUTPUT_DIR, combined_video_path, delete_originals=True)
# 4. TTS + sous-titres karaoké
final_output = f"REEL_FINAL_{uuid.uuid4().hex[:6]}.mp4"
creer_video_sous_titree_karaoke(combined_video_path, voiceover_script, final_output)
# Nettoyage
if os.path.exists(combined_video_path):
os.remove(combined_video_path)
print("\n" + "=" * 70)
print(f"🎉 VIDÉO TERMINÉE ET SAUVEGARDÉE : {os.path.abspath(final_output)}")
print("=" * 70)
if __name__ == "__main__":
main()