-
Notifications
You must be signed in to change notification settings - Fork 10.7k
Add macOS/Windows setup scripts and update modules for enhanced funct… #1323
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
@echo off | ||
REM clone_or_update_deep_live_cam.bat - Clone or update Deep-Live-Cam repo in a separate folder and sync to local working folder | ||
SET REPO_URL=https://github.com/hacksider/Deep-Live-Cam.git | ||
SET TARGET_DIR=Deep-Live-Cam-remote | ||
SET LOCAL_DIR=Deep-Live-Cam | ||
|
||
IF EXIST %TARGET_DIR% ( | ||
echo Updating existing repo in %TARGET_DIR% ... | ||
cd %TARGET_DIR% | ||
git pull | ||
cd .. | ||
) ELSE ( | ||
echo Cloning repo to %TARGET_DIR% ... | ||
git clone %REPO_URL% %TARGET_DIR% | ||
) | ||
|
||
REM Sync updated code to local working folder (excluding .git and models) | ||
xcopy %TARGET_DIR% %LOCAL_DIR% /E /H /Y /EXCLUDE:exclude.txt | ||
|
||
echo Done. Latest code is in %LOCAL_DIR%. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
#!/bin/zsh | ||
# clone_or_update_deep_live_cam.sh - Clone or update Deep-Live-Cam repo in a separate folder (macOS/Linux) | ||
REPO_URL="https://github.com/hacksider/Deep-Live-Cam.git" | ||
TARGET_DIR="Deep-Live-Cam-remote" | ||
|
||
if [ -d "$TARGET_DIR" ]; then | ||
echo "Updating existing repo in $TARGET_DIR ..." | ||
cd "$TARGET_DIR" | ||
git pull | ||
cd .. | ||
else | ||
echo "Cloning repo to $TARGET_DIR ..." | ||
git clone "$REPO_URL" "$TARGET_DIR" | ||
fi | ||
|
||
# Sync updated code to local working folder (excluding .git and models) | ||
LOCAL_DIR="Deep-Live-Cam" | ||
rsync -av --exclude='.git' --exclude='models' --exclude='*.pth' --exclude='*.onnx' "$TARGET_DIR"/ "$LOCAL_DIR"/ | ||
|
||
echo "Done. Latest code is in $LOCAL_DIR." |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
.git | ||
models | ||
*.pth | ||
*.onnx |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
#!/bin/bash | ||
# Deep-Live-Cam macOS Automated Setup | ||
set -e | ||
|
||
# 1. Ensure Homebrew is installed | ||
if ! command -v brew &> /dev/null; then | ||
echo "Homebrew not found. Please install Homebrew first: https://brew.sh/" | ||
exit 1 | ||
fi | ||
|
||
# 2. Install Python 3.10 and tkinter | ||
brew install [email protected] [email protected] | ||
|
||
# 3. Create and activate virtual environment | ||
PYTHON_BIN=$(brew --prefix [email protected])/bin/python3.10 | ||
$PYTHON_BIN -m venv venv | ||
source venv/bin/activate | ||
|
||
# 4. Upgrade pip and install dependencies | ||
pip install --upgrade pip | ||
pip install -r requirements.txt | ||
|
||
# 5. Download models if not present | ||
mkdir -p models | ||
if [ ! -f models/GFPGANv1.4.pth ]; then | ||
curl -L -o models/GFPGANv1.4.pth "https://huggingface.co/hacksider/deep-live-cam/resolve/main/GFPGANv1.4.pth" | ||
fi | ||
if [ ! -f models/inswapper_128_fp16.onnx ]; then | ||
curl -L -o models/inswapper_128_fp16.onnx "https://huggingface.co/hacksider/deep-live-cam/resolve/main/inswapper_128_fp16.onnx" | ||
fi | ||
|
||
# 6. Run instructions for user | ||
|
||
echo "\nSetup complete!" | ||
echo "To activate your environment and run Deep-Live-Cam, use one of the following commands:" | ||
echo "" | ||
echo "# For CUDA (Nvidia GPU, if supported):" | ||
echo "source venv/bin/activate && python run.py --execution-provider cuda" | ||
echo "" | ||
echo "# For Apple Silicon (M1/M2/M3) CoreML:" | ||
echo "source venv/bin/activate && python3.10 run.py --execution-provider coreml" | ||
echo "" | ||
echo "# For CPU only:" | ||
echo "source venv/bin/activate && python run.py" |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
@echo off | ||
REM Deep-Live-Cam Windows Automated Setup | ||
|
||
REM 1. Create virtual environment | ||
python -m venv venv | ||
if errorlevel 1 ( | ||
echo Failed to create virtual environment. Ensure Python 3.10+ is installed and in PATH. | ||
exit /b 1 | ||
) | ||
|
||
REM 2. Activate virtual environment | ||
call venv\Scripts\activate | ||
if errorlevel 1 ( | ||
echo Failed to activate virtual environment. | ||
exit /b 1 | ||
) | ||
|
||
REM 3. Install dependencies | ||
pip install --upgrade pip | ||
pip install -r requirements.txt | ||
if errorlevel 1 ( | ||
echo Failed to install dependencies. | ||
exit /b 1 | ||
) | ||
|
||
REM 4. Download models (manual step if not present) | ||
echo Downloading models (if not already in models/)... | ||
if not exist models\GFPGANv1.4.pth ( | ||
powershell -Command "Invoke-WebRequest -Uri https://huggingface.co/hacksider/deep-live-cam/resolve/main/GFPGANv1.4.pth -OutFile models\GFPGANv1.4.pth" | ||
) | ||
if not exist models\inswapper_128_fp16.onnx ( | ||
powershell -Command "Invoke-WebRequest -Uri https://huggingface.co/hacksider/deep-live-cam/resolve/main/inswapper_128_fp16.onnx -OutFile models\inswapper_128_fp16.onnx" | ||
) | ||
|
||
REM 5. Run the app | ||
python run.py |
Original file line number | Diff line number | Diff line change | ||||||
---|---|---|---|---|---|---|---|---|
|
@@ -4,29 +4,35 @@ | |||||||
|
||||||||
|
||||||||
def get_video_frame(video_path: str, frame_number: int = 0) -> Any: | ||||||||
"""Extract a specific frame from a video file, with color correction if enabled.""" | ||||||||
capture = cv2.VideoCapture(video_path) | ||||||||
|
||||||||
# Set MJPEG format to ensure correct color space handling | ||||||||
capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) | ||||||||
|
||||||||
# Only force RGB conversion if color correction is enabled | ||||||||
if modules.globals.color_correction: | ||||||||
capture.set(cv2.CAP_PROP_CONVERT_RGB, 1) | ||||||||
|
||||||||
frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) | ||||||||
capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) | ||||||||
has_frame, frame = capture.read() | ||||||||
|
||||||||
if has_frame and modules.globals.color_correction: | ||||||||
# Convert the frame color if necessary | ||||||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | ||||||||
|
||||||||
capture.release() | ||||||||
return frame if has_frame else None | ||||||||
try: | ||||||||
# Set MJPEG format to ensure correct color space handling | ||||||||
capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) | ||||||||
# Only force RGB conversion if color correction is enabled | ||||||||
if modules.globals.color_correction: | ||||||||
capture.set(cv2.CAP_PROP_CONVERT_RGB, 1) | ||||||||
frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) | ||||||||
capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) | ||||||||
has_frame, frame = capture.read() | ||||||||
if has_frame and modules.globals.color_correction: | ||||||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | ||||||||
return frame if has_frame else None | ||||||||
except Exception as e: | ||||||||
print(f"Error extracting video frame: {e}") | ||||||||
return None | ||||||||
finally: | ||||||||
capture.release() | ||||||||
|
||||||||
|
||||||||
def get_video_frame_total(video_path: str) -> int: | ||||||||
"""Return the total number of frames in a video file.""" | ||||||||
capture = cv2.VideoCapture(video_path) | ||||||||
video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) | ||||||||
capture.release() | ||||||||
return video_frame_total | ||||||||
try: | ||||||||
video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) | ||||||||
return video_frame_total | ||||||||
Comment on lines
+32
to
+33
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. suggestion (code-quality): Inline variable that is immediately returned (
Suggested change
|
||||||||
except Exception as e: | ||||||||
print(f"Error getting video frame total: {e}") | ||||||||
return 0 | ||||||||
finally: | ||||||||
capture.release() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,32 +1,42 @@ | ||
import numpy as np | ||
from sklearn.cluster import KMeans | ||
from sklearn.metrics import silhouette_score | ||
from typing import Any | ||
from typing import Any, List, Tuple | ||
|
||
|
||
def find_cluster_centroids(embeddings, max_k=10) -> Any: | ||
def find_cluster_centroids(embeddings: List[Any], max_k: int = 10) -> Any: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. issue: Handle empty embeddings list in find_cluster_centroids Add a check to return an empty list if |
||
"""Find optimal cluster centroids for a set of embeddings using KMeans.""" | ||
inertia = [] | ||
cluster_centroids = [] | ||
K = range(1, max_k+1) | ||
|
||
for k in K: | ||
kmeans = KMeans(n_clusters=k, random_state=0) | ||
kmeans.fit(embeddings) | ||
inertia.append(kmeans.inertia_) | ||
cluster_centroids.append({"k": k, "centroids": kmeans.cluster_centers_}) | ||
try: | ||
kmeans = KMeans(n_clusters=k, random_state=0) | ||
kmeans.fit(embeddings) | ||
inertia.append(kmeans.inertia_) | ||
cluster_centroids.append({"k": k, "centroids": kmeans.cluster_centers_}) | ||
except Exception as e: | ||
print(f"KMeans failed for k={k}: {e}") | ||
|
||
if len(inertia) < 2: | ||
return cluster_centroids[0]['centroids'] if cluster_centroids else [] | ||
|
||
diffs = [inertia[i] - inertia[i+1] for i in range(len(inertia)-1)] | ||
optimal_centroids = cluster_centroids[diffs.index(max(diffs)) + 1]['centroids'] | ||
|
||
return optimal_centroids | ||
|
||
def find_closest_centroid(centroids: list, normed_face_embedding) -> list: | ||
|
||
def find_closest_centroid(centroids: List[Any], normed_face_embedding: Any) -> Tuple[int, Any]: | ||
"""Find the index and value of the centroid closest to the given embedding.""" | ||
try: | ||
centroids = np.array(centroids) | ||
normed_face_embedding = np.array(normed_face_embedding) | ||
similarities = np.dot(centroids, normed_face_embedding) | ||
closest_centroid_index = np.argmax(similarities) | ||
|
||
return closest_centroid_index, centroids[closest_centroid_index] | ||
except ValueError: | ||
return None | ||
except Exception as e: | ||
print(f"Error in find_closest_centroid: {e}") | ||
return -1, None |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
issue (code-quality): Extract code out into function (
extract-method
)