Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
52 commits
Select commit Hold shift + click to select a range
8b2c12b
Fixed navigation to home
tushar1977 Sep 23, 2025
0129f7c
Merge branch 'AOSSIE-Org:main' into main
tushar1977 Sep 23, 2025
52a8a57
Removed console logs
tushar1977 Sep 23, 2025
7b73b34
Testing webcam permissions
tushar1977 Sep 25, 2025
4fc47c7
testing for permissions
tushar1977 Sep 25, 2025
5c05baf
Testing webcam capture on windows
tushar1977 Sep 26, 2025
1e4b900
Added dialog box for those device that dont support webcam
tushar1977 Oct 5, 2025
5d06b19
Merge branch 'AOSSIE-Org:main' into main
tushar1977 Oct 5, 2025
b9a36dd
Revert "Removed console logs"
tushar1977 Oct 5, 2025
2c037c8
Reverted some changes to 7b73b34 commit and fixed the closing of dial…
tushar1977 Oct 13, 2025
79c9b67
testing on windows
tushar1977 Oct 14, 2025
f655d50
Merge branch 'AOSSIE-Org:main' into test
tushar1977 Oct 14, 2025
994b943
Implemented Base64 to image route
tushar1977 Oct 16, 2025
ed5168b
Added routes in frontend
tushar1977 Oct 16, 2025
b083616
Merge test branch
tushar1977 Oct 16, 2025
4fba2f2
Fixed mutate function to fetch images from fetchSearchedFacesBase64
tushar1977 Oct 16, 2025
ec95615
Merge branch 'main' of https://github.com/tushar1977/PictoPy
tushar1977 Oct 16, 2025
14f55fc
Fixed bugs in backend
tushar1977 Oct 16, 2025
efe79db
Fixed frontend
tushar1977 Oct 16, 2025
fd18420
Fixed linting
tushar1977 Oct 16, 2025
dbc0820
Removed redundant import
tushar1977 Oct 16, 2025
5b216a6
Reverted main.rs file
tushar1977 Oct 16, 2025
1f7113d
Reverted files
tushar1977 Oct 16, 2025
eb99b0e
Fixed bugs
tushar1977 Oct 16, 2025
7c8bef7
Fixed grammatical mistake
tushar1977 Oct 16, 2025
7297b59
Fixed critical and major bugs
tushar1977 Oct 16, 2025
a12a7c9
Fixed tauri config file
tushar1977 Oct 16, 2025
340b1e9
Fixed package json
tushar1977 Oct 16, 2025
aed41b4
Reformated using black
tushar1977 Oct 16, 2025
aa01490
Fixed webcam bugs
tushar1977 Oct 16, 2025
8967c03
Fixed cleanup
tushar1977 Oct 16, 2025
e74e37e
Fixed default image to appear when searching
tushar1977 Oct 19, 2025
06f3c53
Fixed thumbnail image and webcam onclose
tushar1977 Oct 19, 2025
66b22a5
Fixed merge conflicts
tushar1977 Oct 19, 2025
7c3eeb0
Fixed linting
tushar1977 Oct 19, 2025
f7c4a97
Removed duplicate photo.jpeg
tushar1977 Oct 19, 2025
ac41b8b
Fixing major bugs
tushar1977 Oct 19, 2025
8a1a930
Merge branch 'main' into main
tushar1977 Oct 19, 2025
8d7a082
Implemented plist file for macOs
tushar1977 Oct 22, 2025
e39321e
Fixed plist file
tushar1977 Oct 22, 2025
6fbb39d
Fix plist
tushar1977 Oct 22, 2025
d1c9131
Fixed plist file for macOs
tushar1977 Oct 22, 2025
acadd54
Added info.plist
tushar1977 Oct 22, 2025
5372fbe
Refractord backend code
tushar1977 Oct 23, 2025
19d11d3
Fixed bugs
tushar1977 Oct 23, 2025
2dfa314
Linted files
tushar1977 Oct 23, 2025
73278fc
Merge branch 'main' into main
tushar1977 Oct 23, 2025
df38c90
Fixed to have limit on base64
tushar1977 Oct 23, 2025
58fcafc
Merge branch 'AOSSIE-Org:main' into main
tushar1977 Oct 28, 2025
d45a4b1
Enhance mutation hooks to include mutationContext in success, error, …
rahulharpal1603 Oct 28, 2025
501552b
Merge branch 'main' into main
tushar1977 Oct 29, 2025
0d56ea4
Fixed verison
tushar1977 Oct 29, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
179 changes: 88 additions & 91 deletions backend/app/routes/face_clusters.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,16 @@
import logging
from binascii import Error as Base64Error
import base64
from typing import Annotated
import uuid
import os
from typing import Optional, List, Dict, Any
from pydantic import BaseModel
from app.config.settings import CONFIDENCE_PERCENT, DEFAULT_FACENET_MODEL
from fastapi import APIRouter, HTTPException, status
from fastapi import APIRouter, HTTPException, Query, status
from app.database.face_clusters import (
db_get_cluster_by_id,
db_update_cluster,
db_get_all_clusters_with_face_counts,
db_get_images_by_cluster_id, # Add this import
)
from app.database.faces import get_all_face_embeddings
from app.models.FaceDetector import FaceDetector
from app.models.FaceNet import FaceNet
from app.schemas.face_clusters import (
RenameClusterRequest,
RenameClusterResponse,
Expand All @@ -26,32 +23,8 @@
GetClusterImagesData,
ImageInCluster,
)
from app.schemas.images import AddSingleImageRequest
from app.utils.FaceNet import FaceNet_util_cosine_similarity


class BoundingBox(BaseModel):
x: float
y: float
width: float
height: float


class ImageData(BaseModel):
id: str
path: str
folder_id: str
thumbnailPath: str
metadata: Dict[str, Any]
isTagged: bool
tags: Optional[List[str]] = None
bboxes: BoundingBox


class GetAllImagesResponse(BaseModel):
success: bool
message: str
data: List[ImageData]
from app.schemas.images import FaceSearchRequest, InputType
from app.utils.faceSearch import perform_face_search


logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -236,67 +209,91 @@ def get_cluster_images(cluster_id: str):
"/face-search",
responses={code: {"model": ErrorResponse} for code in [400, 500]},
)
def face_tagging(payload: AddSingleImageRequest):
image_path = payload.path
if not os.path.isfile(image_path):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorResponse(
success=False,
error="Invalid file path",
message="The provided path is not a valid file",
).model_dump(),
)

fd = FaceDetector()
fn = FaceNet(DEFAULT_FACENET_MODEL)
try:
matches = []
image_id = str(uuid.uuid4())
result = fd.detect_faces(image_id, image_path, forSearch=True)
if not result or result["num_faces"] == 0:
return GetAllImagesResponse(
success=True,
message=f"Successfully retrieved {len(matches)} images",
data=[],
def face_tagging(
payload: FaceSearchRequest,
input_type: Annotated[
InputType, Query(description="Choose input type: 'path' or 'base64'")
] = InputType.path,
):
image_path = None

if input_type == InputType.path:
local_file_path = payload.path

if not local_file_path:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorResponse(
success=False,
error="No Image path provided ",
message="image path is required.",
).model_dump(),
)
if not os.path.isfile(local_file_path):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorResponse(
success=False,
error="Invalid file path",
message="The provided path is not a valid file",
).model_dump(),
)
image_path = payload.path

process_face = result["processed_faces"][0]
new_embedding = fn.get_embedding(process_face)

images = get_all_face_embeddings()
if len(images) == 0:
return GetAllImagesResponse(
success=True,
message=f"Successfully retrieved {len(matches)} images",
data=[],
elif input_type == InputType.base64:
base64_data = payload.base64_data
if not base64_data:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorResponse(
success=False,
error="No base64 data",
message="Base64 image data is required.",
).model_dump(),
)
else:
for image in images:
max_similarity = 0
similarity = FaceNet_util_cosine_similarity(
new_embedding, image["embeddings"]
)
max_similarity = max(max_similarity, similarity)
if max_similarity >= CONFIDENCE_PERCENT:
matches.append(
ImageData(
id=image["id"],
path=image["path"],
folder_id=image["folder_id"],
thumbnailPath=image["thumbnailPath"],
metadata=image["metadata"],
isTagged=image["isTagged"],
tags=image["tags"],
bboxes=image["bbox"],
)
)

return GetAllImagesResponse(
success=True,
message=f"Successfully retrieved {len(matches)} images",
data=matches,
MAX_B64_LEN = 14_000_000 # 10MB
if len(base64_data) > MAX_B64_LEN:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorResponse(
success=False,
error="Payload too large",
message="Base64 image exceeds maximum allowed size.",
).model_dump(),
)
try:
image_bytes = base64.b64decode(base64_data.split(",")[-1])
except (Base64Error, ValueError):
Comment on lines +255 to +267
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Decode first, enforce byte-size, and use a secure temp file.
Current length check is on the base64 string (imprecise), and writes to a predictable path. Decode, validate bytes, respond with 413, and use tempfile.NamedTemporaryFile.

-        MAX_B64_LEN = 14_000_000  # 10MB
-        if len(base64_data) > MAX_B64_LEN:
-            raise HTTPException(
-                status_code=status.HTTP_400_BAD_REQUEST,
-                detail=ErrorResponse(
-                    success=False,
-                    error="Payload too large",
-                    message="Base64 image exceeds maximum allowed size.",
-                ).model_dump(),
-            )
-        try:
-            image_bytes = base64.b64decode(base64_data.split(",")[-1])
+        try:
+            # Decode first; then validate exact byte size.
+            image_bytes = base64.b64decode(base64_data.split(",")[-1])
         except (Base64Error, ValueError):
             raise HTTPException(
                 status_code=status.HTTP_400_BAD_REQUEST,
                 detail=ErrorResponse(
                     success=False,
                     error="Invalid base64 data",
                     message="The provided base64 image data is malformed or invalid.",
                 ).model_dump(),
             )
 
+        MAX_IMAGE_BYTES = 10 * 1024 * 1024  # 10 MiB
+        if len(image_bytes) > MAX_IMAGE_BYTES:
+            raise HTTPException(
+                status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
+                detail=ErrorResponse(
+                    success=False,
+                    error="Payload too large",
+                    message="Image exceeds maximum allowed size (10 MiB).",
+                ).model_dump(),
+            )
@@
-        image_id = str(uuid.uuid4())[:8]
-        temp_dir = "temp_uploads"
-        os.makedirs(temp_dir, exist_ok=True)
-        local_image_path = os.path.join(temp_dir, f"{image_id}.{extension}")
-
-        with open(local_image_path, "wb") as f:
-            f.write(image_bytes)
+        import tempfile
+        with tempfile.NamedTemporaryFile(
+            prefix="pictopy_", suffix=f".{extension}", delete=False
+        ) as tmp:
+            tmp.write(image_bytes)
+            local_image_path = tmp.name

Also applies to: 277-294

🤖 Prompt for AI Agents
In backend/app/routes/face_clusters.py around lines 255-267 (also apply same
changes to 277-294): the current logic checks the length of the base64 string
and writes to a predictable path; instead decode the base64 first, then enforce
a byte-size limit (respond with HTTP 413 Payload Too Large), and only after
validating size write to a secure temporary file using
tempfile.NamedTemporaryFile (delete=False if needed) to avoid predictable paths;
also catch decoding errors and return a 400 with a clear error payload as
before.

raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorResponse(
success=False,
error="Invalid base64 data",
message="The provided base64 image data is malformed or invalid.",
).model_dump(),
)

format_match = (
base64_data.split(";")[0].split("/")[-1] if ";" in base64_data else "jpeg"
)
extension = (
format_match
if format_match in ["jpeg", "jpg", "png", "gif", "webp"]
else "jpeg"
)
image_id = str(uuid.uuid4())[:8]
temp_dir = "temp_uploads"
os.makedirs(temp_dir, exist_ok=True)
local_image_path = os.path.join(temp_dir, f"{image_id}.{extension}")

with open(local_image_path, "wb") as f:
f.write(image_bytes)

image_path = local_image_path

try:
return perform_face_search(image_path)
finally:
fd.close()
fn.close()
if input_type == InputType.base64 and image_path and os.path.exists(image_path):
os.remove(image_path)
12 changes: 9 additions & 3 deletions backend/app/schemas/images.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
from enum import Enum
from pydantic import BaseModel
from typing import Optional, List, Union


# Request Model
class AddSingleImageRequest(BaseModel):
path: str
class InputType(str, Enum):
path = "path"
base64 = "base64"


class FaceSearchRequest(BaseModel):
path: Optional[str] = None
base64_data: Optional[str] = None
Comment on lines +11 to +13
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Add validation to ensure at least one input field is provided.

Both path and base64_data are optional with no constraint enforcement. The API could receive requests with both fields set to None, leading to errors downstream.

Apply this diff to add validation using Pydantic's model validator:

+from pydantic import model_validator
+
 class FaceSearchRequest(BaseModel):
     path: Optional[str] = None
     base64_data: Optional[str] = None
+
+    @model_validator(mode='after')
+    def check_at_least_one_field(self):
+        if not self.path and not self.base64_data:
+            raise ValueError("Either 'path' or 'base64_data' must be provided")
+        if self.path and self.base64_data:
+            raise ValueError("Only one of 'path' or 'base64_data' should be provided")
+        return self
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
class FaceSearchRequest(BaseModel):
path: Optional[str] = None
base64_data: Optional[str] = None
from pydantic import model_validator
class FaceSearchRequest(BaseModel):
path: Optional[str] = None
base64_data: Optional[str] = None
@model_validator(mode='after')
def check_at_least_one_field(self):
if not self.path and not self.base64_data:
raise ValueError("Either 'path' or 'base64_data' must be provided")
if self.path and self.base64_data:
raise ValueError("Only one of 'path' or 'base64_data' should be provided")
return self
🤖 Prompt for AI Agents
In backend/app/schemas/images.py around lines 11 to 13, the FaceSearchRequest
model declares path and base64_data as Optional but does not validate that at
least one is provided; add a Pydantic root validator (or @root_validator) to
check that either path or base64_data is not None/empty and raise a ValueError
with a clear message if both are missing/empty so the API rejects invalid
requests early.



class AddMultipleImagesRequest(BaseModel):
Expand Down
106 changes: 106 additions & 0 deletions backend/app/utils/faceSearch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import uuid
from typing import Optional, List, Dict, Any
from pydantic import BaseModel
from app.config.settings import CONFIDENCE_PERCENT, DEFAULT_FACENET_MODEL
from app.database.faces import get_all_face_embeddings
from app.models.FaceDetector import FaceDetector
from app.models.FaceNet import FaceNet
from app.utils.FaceNet import FaceNet_util_cosine_similarity


class BoundingBox(BaseModel):
x: float
y: float
width: float
height: float


class ImageData(BaseModel):
id: str
path: str
folder_id: str
thumbnailPath: str
metadata: Dict[str, Any]
isTagged: bool
tags: Optional[List[str]] = None
bboxes: BoundingBox


class GetAllImagesResponse(BaseModel):
success: bool
message: str
data: List[ImageData]

Comment on lines +11 to +33
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Fix schema mismatch and avoid duplicate models.
Local ImageData defines bboxes: BoundingBox (singular) while DB returns a list; also duplicates schemas already in app.schemas.images.

-from pydantic import BaseModel
-from app.config.settings import CONFIDENCE_PERCENT, DEFAULT_FACENET_MODEL
+from app.config.settings import CONFIDENCE_PERCENT, DEFAULT_FACENET_MODEL
+from app.schemas.images import ImageData, GetAllImagesResponse
@@
-class BoundingBox(BaseModel):
-    x: float
-    y: float
-    width: float
-    height: float
-
-
-class ImageData(BaseModel):
-    id: str
-    path: str
-    folder_id: str
-    thumbnailPath: str
-    metadata: Dict[str, Any]
-    isTagged: bool
-    tags: Optional[List[str]] = None
-    bboxes: BoundingBox
-
-
-class GetAllImagesResponse(BaseModel):
-    success: bool
-    message: str
-    data: List[ImageData]
+# Reuse shared schemas to prevent drift with OpenAPI.
@@
                 matches.append(
                     ImageData(
                         id=image["id"],
                         path=image["path"],
                         folder_id=image["folder_id"],
                         thumbnailPath=image["thumbnailPath"],
                         metadata=image["metadata"],
                         isTagged=image["isTagged"],
                         tags=image["tags"],
-                        bboxes=image["bbox"],
+                        # Note: bbox/bboxes omitted here to match ImageData schema.
                     )
                 )

Follow-up: If bbox must be returned, extend the shared ImageData schema and OpenAPI accordingly instead of redefining models locally. Based on learnings

Also applies to: 18-27, 29-33, 84-93

🤖 Prompt for AI Agents
backend/app/utils/faceSearch.py lines ~11-33 (and also affecting ranges 18-27,
29-33, 84-93): the local ImageData and BoundingBox schemas are incorrect and
duplicate definitions that already exist in app.schemas.images; change bboxes
from a single BoundingBox to a List[BoundingBox], remove the duplicated model
definitions, and import the shared BoundingBox and ImageData schemas from
app.schemas.images instead; if you need to include thumbnailPath, metadata or
isTagged fields not present in the shared schema, extend the imported ImageData
(or create a subclass) so the OpenAPI schema reflects the additional fields, and
update GetAllImagesResponse to use the shared/extended ImageData type. Ensure
typing and imports are adjusted and that the OpenAPI exposure uses the
shared/extended model rather than locally redefining it.


def perform_face_search(image_path: str) -> GetAllImagesResponse:
"""
Performs face detection, embedding generation, and similarity search.

Args:
image_path (str): Path to the image file to process.

Returns:
GetAllImagesResponse: Search result containing matched images.
"""
fd = FaceDetector()
fn = FaceNet(DEFAULT_FACENET_MODEL)

try:
matches = []
image_id = str(uuid.uuid4())

try:
result = fd.detect_faces(image_id, image_path, forSearch=True)
except Exception as e:
return GetAllImagesResponse(
success=False,
message=f"Failed to process image: {str(e)}",
data=[],
)
if not result or result["num_faces"] == 0:
return GetAllImagesResponse(
success=True,
message="No faces detected in the image.",
data=[],
)

process_face = result["processed_faces"][0]
new_embedding = fn.get_embedding(process_face)

images = get_all_face_embeddings()
if not images:
return GetAllImagesResponse(
success=True,
message="No face embeddings available for comparison.",
data=[],
)

for image in images:
similarity = FaceNet_util_cosine_similarity(
new_embedding, image["embeddings"]
)
if similarity >= CONFIDENCE_PERCENT:
matches.append(
ImageData(
id=image["id"],
path=image["path"],
folder_id=image["folder_id"],
thumbnailPath=image["thumbnailPath"],
metadata=image["metadata"],
isTagged=image["isTagged"],
tags=image["tags"],
bboxes=image["bbox"],
)
)

return GetAllImagesResponse(
success=True,
message=f"Successfully retrieved {len(matches)} matching images.",
data=matches,
)

finally:
if "fd" in locals() and fd is not None:
fd.close()
if "fn" in locals() and fn is not None:
fn.close()
Loading
Loading