Skip to content

Commit

Permalink
Merge pull request #21 from alganzory/strictness_and_performance_enha…
Browse files Browse the repository at this point in the history
…ncements

v0.1.1  = Strictness and performance enhancements
  • Loading branch information
alganzory authored Nov 2, 2023
2 parents 89a1753 + 58f15e0 commit 333e1b7
Show file tree
Hide file tree
Showing 13 changed files with 346 additions and 104 deletions.
6 changes: 3 additions & 3 deletions README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ HaramBlur is a browser extension that allows you to navigate the web with respec

HaramBlur utilizes face detection and NSFW content detection and provides controls that allow you to uphold the Islamic gaze protection principle and tailor your online experience by automatically blurring images and videos that contain unwanted or impermissible content.

You can configure the type of detection you want and the amount of blur, hover to unblur, choose a specific gender to blur, or turn the extension on and off via the interactive pop-up 😄
You can configure the type of detection you want and the amount of blur, the level of strictness, hover to unblur, choose a specific gender to blur, or turn the extension on and off via the interactive pop-up 😄

![HaramBlur Demo](demos/demo1.png)

Expand All @@ -16,7 +16,7 @@ You can configure the type of detection you want and the amount of blur, hover t
- Configurable detection settings to tailor your browsing experience
- Interactive pop-up for easy on/off toggling
- Customizable hover to unblur feature
- Decent speed and accuracy (can be improved)
- Decent speed and accuracy (continously improved)

## How it Works

Expand All @@ -26,7 +26,7 @@ HaramBlur currently used face detection and recognition features provided by [Hu
1. Clone the repository
2. run `npm install`
3. code your magic
4. run `npm run build` to build the extension, then load the extension in Chrome by going to `chrome://extensions/` and clicking on `Load unpacked` (developer mode has to be on) and selecting the project folder.
4. run `npm run build` to build the extension, then load the extension in Chrome by going to `chrome://extensions/` and clicking on `Load unpacked` (developer mode has to be on) and selecting the project folder.
5. Or run `npm run release` to generate a zip file for the extension to be uploaded to the browser/store.


Expand Down
Binary file modified demos/demo1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"manifest_version": 3,
"name": "HaramBlur",
"description": "Protect your privacy and uphold Islamic values by auto detecting & blurring images and videos of unwanted or impermissible content.",
"version": "0.1.0",
"version": "0.1.1",
"permissions": ["activeTab", "storage"],
"author": "[email protected]",
"action": {
Expand Down
1 change: 1 addition & 0 deletions src/background.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ const defaultSettings = {
blurFemale: true,
unblurImages: true,
unblurVideos: false,
strictness: 0.3, // goes from 0 to 1
};

chrome.runtime.onInstalled.addListener(function () {
Expand Down
2 changes: 1 addition & 1 deletion src/content.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ getSettings()
return Promise.all([initHuman(), initNsfwModel()]);
})
.then(() => {
console.log("HB==NSFW MODEL INITIALIZED", nsfwModel);
console.log("HB== models initialized");

// wait for the dom to load
if (document.readyState === "loading") {
Expand Down
10 changes: 5 additions & 5 deletions src/modules/detector.js
Original file line number Diff line number Diff line change
Expand Up @@ -65,25 +65,25 @@ const getNsfwClasses = (factor = 0) => {
nsfw: false,
thresh: 0.5,
},
1: {
1: {
className: "Hentai",
nsfw: true,
thresh: 0.3 + factor * 0.7, // highest it can go is 1
thresh: 0.5 + (1-factor) * 0.9, // the higher the factor, the lower the thresh, the more "strict" the filter
},
2: {
className: "Neutral",
nsfw: false,
thresh: 0.8,
thresh: 0.5 + factor * 0.5, // the higher the factor, the higher the thresh, the less "strict" the filter
},
3: {
className: "Porn",
nsfw: true,
thresh: 0.1 + factor * 0.35, // highest it can go is 0.4
thresh: 0.1 + (1-factor) * 0.9, // the higher the factor, the lower the thresh, the more "strict" the filter
},
4: {
className: "Sexy",
nsfw: true,
thresh: 0.1 + factor * 0.35, // highest it can go is 0.4
thresh: 0.1 + (1-factor) * 0.9, // the higher the factor, the lower the thresh, the more "strict" the filter
},
};
};
Expand Down
14 changes: 12 additions & 2 deletions src/modules/helpers.js
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,23 @@ const hasBeenProcessed = (element) => {
const processNode = (node, callBack) => {
// if the node itself is an image or video, process it
let nodes = [];
if (node.tagName === "IMG" || node.tagName === "VIDEO") {
if (node.tagName === "IMG") {
!isImageTooSmall(node) && nodes.push(node);
}
if (node.tagName === "VIDEO") {
nodes.push(node);
}

node?.querySelectorAll
? nodes.push(...node.querySelectorAll("img, video"))
: null;
nodes?.forEach(callBack);
nodes?.forEach((node) => {
return node.tagName === "VIDEO"
? callBack(node)
: !isImageTooSmall(node)
? callBack(node)
: null;
});
};

const emitEvent = (eventName, detail = "") => {
Expand Down
126 changes: 99 additions & 27 deletions src/modules/observers.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,60 +2,132 @@
// This module exports the intersection observer and mutation observer functions

import { runDetection } from "./processing.js"; // import the runDetection function from processing.js
import { listenToEvent, processNode } from "./helpers.js";
import {
shouldDetect,
shouldDetectImages,
shouldDetectVideos,
} from "./settings.js";
import { emitEvent, listenToEvent, processNode } from "./helpers.js";
import { shouldDetect } from "./settings.js";
import { applyBlurryStartMode } from "./style.js";

const BATCH_SIZE = 20; //TODO: make this a setting/calculated based on the device's performance

let intersectionObserver;
let mutationObserver;
let highPriorityQueue = new Set();
let lowPriorityQueue = new Set();
let observationStarted = false;

const processNextImage = async () => {
let batch = [];

// Fill the batch with high-priority images first
while (batch.length < BATCH_SIZE && highPriorityQueue.size > 0) {
const nextImage = highPriorityQueue.entries().next()?.value?.[0];
nextImage.dataset.processed
? null
: batch.push(
runDetection(nextImage).then(() => {
// cancel the requestIdleCallback so it doesn't run after the image has been processed
if (nextImage.dataset.ribId) {
cancelIdleCallback(nextImage.dataset.ribId);

// remove the id from the dataset
delete nextImage.dataset.ribId;
}
})
);
highPriorityQueue.delete(nextImage);
}

// If there's still room in the batch, fill the rest with low-priority images
while (batch.length < BATCH_SIZE && lowPriorityQueue.size > 0) {
const nextImage = lowPriorityQueue.entries().next()?.value?.[0];

// push a promise that runs the runDetection function through requestIdleCallback, we also wanna store
// the id of the requestIdleCallback in the image object so we can cancel it if the image is moved to the
// high-priority queue
batch.push(
new Promise((resolve) => {
const id = requestIdleCallback(() => {
runDetection(nextImage).then(() => {
// remove the id from the dataset
delete nextImage.dataset.ribId;

resolve();
});
});
nextImage.dataset.ribId = id;
})
);

lowPriorityQueue.delete(nextImage);
}

if (batch.length > 0) {
await Promise.allSettled(batch);

if (lowPriorityQueue.size > 0 || highPriorityQueue.size > 0)
processNextImage(); // Call processNextImage again after all images in the batch have been processed
}
};

const increasePriority = (node) => {
lowPriorityQueue.delete(node);
highPriorityQueue.add(node);
};

const decreasePriority = (node) => {
highPriorityQueue.delete(node);
lowPriorityQueue.add(node);
};

const startObservation = () => {
if (observationStarted) return;
observationStarted = true;
emitEvent("observationStarted");
};

const initIntersectionObserver = async () => {
intersectionObserver = new IntersectionObserver(
(entries) => {
const visibleEntries = entries.filter(
(entry) => entry.isIntersecting
);
const visiblePromises = visibleEntries.map(async (entry) => {
const imgOrVideo = entry.target;
intersectionObserver.unobserve(imgOrVideo);
return runDetection(imgOrVideo);
entries.forEach((entry) => {
const node = entry.target;
const changePriority = entry.isIntersecting
? increasePriority
: decreasePriority;

changePriority(node);
});
Promise.allSettled(visiblePromises);
},
{ rootMargin: "100px", threshold: 0 }
);

// use querySelectorAll to get all images and videos
const images = shouldDetectImages ? document.querySelectorAll("img") : [];
const videos = shouldDetectVideos ? document.querySelectorAll("video") : [];
for (let img of images) {
intersectionObserver.observe(img);
}
for (let video of videos) {
intersectionObserver.observe(video);
}
};

const initMutationObserver = async () => {
mutationObserver = new MutationObserver((mutations) => {
mutations.forEach((mutation) => {
if (mutation.type === "childList") {
mutation.addedNodes.forEach((node) => {
processNode(node, (node) =>
intersectionObserver.observe(node)
);
processNode(node, (node) => {
startObservation();
applyBlurryStartMode(node);
return intersectionObserver.observe(node);
});
});
}
});

shouldDetect() && processNextImage();
});

mutationObserver.observe(document.body, {
childList: true,
subtree: true,
});

// process all images and videos that are already in the DOM
processNode(document.body, (node) => {
startObservation();
applyBlurryStartMode(node);
return intersectionObserver.observe(node);
});
};

const attachObserversListener = () => {
Expand Down
Loading

0 comments on commit 333e1b7

Please sign in to comment.