- Create dedicated ProfanityFilter class with isolated SQLite database - Separate profanity.db from main application database to prevent SQLITE_MISUSE errors - Add comprehensive custom word management (CRUD operations) - Implement advanced profanity detection with leetspeak and pattern matching - Add admin UI for managing custom profanity words - Add extensive test suites for both profanity filter and API routes - Update server.js to use isolated profanity filter - Add proper database initialization and cleanup methods - Support in-memory databases for testing Breaking changes: - Profanity filter now uses separate database file - Updated admin API endpoints for profanity management - Enhanced profanity detection capabilities
325 lines
10 KiB
JavaScript
325 lines
10 KiB
JavaScript
const sqlite3 = require('sqlite3').verbose();
|
|
const ProfanityFilter = require('../profanity-filter');
|
|
|
|
describe('ProfanityFilter', () => {
|
|
let filter;
|
|
|
|
beforeEach(async () => {
|
|
// Create profanity filter with in-memory database
|
|
filter = new ProfanityFilter(':memory:');
|
|
|
|
// Wait for initialization to complete
|
|
await new Promise(resolve => setTimeout(resolve, 200));
|
|
|
|
// Ensure custom words are loaded
|
|
await filter.loadCustomWords();
|
|
});
|
|
|
|
afterEach(() => {
|
|
if (filter) {
|
|
filter.close();
|
|
}
|
|
});
|
|
|
|
describe('Basic Profanity Detection', () => {
|
|
test('should detect single profanity word', () => {
|
|
const result = filter.analyzeProfanity('This is shit');
|
|
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBe(1);
|
|
expect(result.severity).toBe('medium');
|
|
expect(result.matches[0].word).toBe('shit');
|
|
});
|
|
|
|
test('should detect multiple profanity words', () => {
|
|
const result = filter.analyzeProfanity('This fucking shit is damn terrible');
|
|
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBe(3);
|
|
expect(result.severity).toBe('medium');
|
|
expect(result.matches.map(m => m.word)).toContain('fuck');
|
|
expect(result.matches.map(m => m.word)).toContain('shit');
|
|
expect(result.matches.map(m => m.word)).toContain('damn');
|
|
});
|
|
|
|
test('should not detect profanity in clean text', () => {
|
|
const result = filter.analyzeProfanity('This road is very slippery with ice');
|
|
|
|
expect(result.hasProfanity).toBe(false);
|
|
expect(result.count).toBe(0);
|
|
expect(result.severity).toBe('none');
|
|
expect(result.matches).toHaveLength(0);
|
|
});
|
|
|
|
test('should handle empty or null input', () => {
|
|
expect(filter.analyzeProfanity('')).toEqual({
|
|
hasProfanity: false,
|
|
matches: [],
|
|
severity: 'none',
|
|
count: 0,
|
|
filtered: ''
|
|
});
|
|
|
|
expect(filter.analyzeProfanity(null)).toEqual({
|
|
hasProfanity: false,
|
|
matches: [],
|
|
severity: 'none',
|
|
count: 0,
|
|
filtered: null
|
|
});
|
|
|
|
expect(filter.analyzeProfanity(undefined)).toEqual({
|
|
hasProfanity: false,
|
|
matches: [],
|
|
severity: 'none',
|
|
count: 0,
|
|
filtered: undefined
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('Leetspeak Detection', () => {
|
|
test('should detect leetspeak profanity', () => {
|
|
const testCases = [
|
|
'This is sh1t',
|
|
'F@ck this',
|
|
'What the f*ck',
|
|
'This is bull$hit',
|
|
'D@mn it',
|
|
'A$$hole behavior'
|
|
];
|
|
|
|
testCases.forEach(text => {
|
|
const result = filter.analyzeProfanity(text);
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBeGreaterThan(0);
|
|
});
|
|
});
|
|
|
|
test('should detect spaced out words', () => {
|
|
const result = filter.analyzeProfanity('f u c k this');
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBeGreaterThan(0);
|
|
});
|
|
});
|
|
|
|
describe('Severity Levels', () => {
|
|
test('should classify high severity words correctly', () => {
|
|
const highSeverityWords = ['kill', 'murder', 'terrorist', 'rape'];
|
|
|
|
highSeverityWords.forEach(word => {
|
|
const result = filter.analyzeProfanity(`This is ${word}`);
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.severity).toBe('high');
|
|
});
|
|
});
|
|
|
|
test('should classify medium severity words correctly', () => {
|
|
const mediumSeverityWords = ['fuck', 'shit', 'bitch'];
|
|
|
|
mediumSeverityWords.forEach(word => {
|
|
const result = filter.analyzeProfanity(`This is ${word}`);
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.severity).toBe('medium');
|
|
});
|
|
});
|
|
|
|
test('should classify low severity words correctly', () => {
|
|
const lowSeverityWords = ['damn', 'hell', 'crap'];
|
|
|
|
lowSeverityWords.forEach(word => {
|
|
const result = filter.analyzeProfanity(`This is ${word}`);
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.severity).toBe('low');
|
|
});
|
|
});
|
|
|
|
test('should use highest severity when multiple words present', () => {
|
|
const result = filter.analyzeProfanity('damn this fucking terrorist');
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.severity).toBe('high'); // terrorist is high severity
|
|
});
|
|
});
|
|
|
|
describe('Text Filtering', () => {
|
|
test('should filter profanity with asterisks', () => {
|
|
const result = filter.analyzeProfanity('This is fucking shit');
|
|
|
|
expect(result.filtered).toContain('***');
|
|
expect(result.filtered).not.toContain('fuck');
|
|
expect(result.filtered).not.toContain('shit');
|
|
});
|
|
|
|
test('should preserve clean parts of text', () => {
|
|
const result = filter.analyzeProfanity('This damn road is slippery');
|
|
|
|
expect(result.filtered).toContain('road is slippery');
|
|
expect(result.filtered).toContain('***');
|
|
expect(result.filtered).not.toContain('damn');
|
|
});
|
|
});
|
|
|
|
describe('Custom Words Management', () => {
|
|
test('should add custom profanity word', async () => {
|
|
const result = await filter.addCustomWord('testword', 'medium', 'custom', 'admin');
|
|
|
|
expect(result.word).toBe('testword');
|
|
expect(result.severity).toBe('medium');
|
|
expect(result.category).toBe('custom');
|
|
});
|
|
|
|
test('should prevent duplicate custom words', async () => {
|
|
await filter.addCustomWord('testword', 'medium', 'custom', 'admin');
|
|
|
|
await expect(
|
|
filter.addCustomWord('testword', 'high', 'custom', 'admin')
|
|
).rejects.toThrow('Word already exists in the filter');
|
|
});
|
|
|
|
test('should detect custom words after reload', async () => {
|
|
await filter.addCustomWord('customfoulword', 'high', 'custom', 'admin');
|
|
await filter.loadCustomWords();
|
|
|
|
const result = filter.analyzeProfanity('This is customfoulword');
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBe(1);
|
|
expect(result.severity).toBe('high');
|
|
});
|
|
|
|
test('should get all custom words', async () => {
|
|
await filter.addCustomWord('word1', 'low', 'custom', 'admin');
|
|
await filter.addCustomWord('word2', 'high', 'custom', 'admin');
|
|
|
|
const words = await filter.getCustomWords();
|
|
expect(words).toHaveLength(2);
|
|
expect(words.map(w => w.word)).toContain('word1');
|
|
expect(words.map(w => w.word)).toContain('word2');
|
|
});
|
|
|
|
test('should update custom word', async () => {
|
|
const added = await filter.addCustomWord('updateword', 'low', 'custom', 'admin');
|
|
|
|
const result = await filter.updateCustomWord(added.id, {
|
|
word: 'updatedword',
|
|
severity: 'high',
|
|
category: 'updated'
|
|
});
|
|
|
|
expect(result.updated).toBe(true);
|
|
expect(result.changes).toBe(1);
|
|
});
|
|
|
|
test('should remove custom word', async () => {
|
|
const added = await filter.addCustomWord('removeword', 'medium', 'custom', 'admin');
|
|
|
|
const result = await filter.removeCustomWord(added.id);
|
|
expect(result.deleted).toBe(true);
|
|
expect(result.changes).toBe(1);
|
|
});
|
|
|
|
test('should handle removing non-existent word', async () => {
|
|
await expect(
|
|
filter.removeCustomWord(99999)
|
|
).rejects.toThrow('Word not found');
|
|
});
|
|
});
|
|
|
|
describe('Edge Cases', () => {
|
|
test('should handle very long text', () => {
|
|
const longText = 'This road is slippery '.repeat(100) + 'shit';
|
|
const result = filter.analyzeProfanity(longText);
|
|
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBe(1);
|
|
});
|
|
|
|
test('should handle text with only profanity', () => {
|
|
const result = filter.analyzeProfanity('fuck');
|
|
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBe(1);
|
|
expect(result.filtered).toBe('****');
|
|
});
|
|
|
|
test('should handle mixed case profanity', () => {
|
|
const result = filter.analyzeProfanity('This FUCKING road is SHIT');
|
|
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBe(2);
|
|
});
|
|
|
|
test('should handle profanity with punctuation', () => {
|
|
const result = filter.analyzeProfanity('Fuck! This shit, damn...');
|
|
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBe(3);
|
|
});
|
|
|
|
test('should not detect profanity in legitimate words containing profane substrings', () => {
|
|
const legitimateWords = [
|
|
'assessment', // contains 'ass'
|
|
'classic', // contains 'ass'
|
|
'assistance', // contains 'ass'
|
|
'cassette' // contains 'ass'
|
|
];
|
|
|
|
legitimateWords.forEach(word => {
|
|
const result = filter.analyzeProfanity(`This is a ${word}`);
|
|
expect(result.hasProfanity).toBe(false);
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('Real-world Road Condition Examples', () => {
|
|
test('should allow legitimate road condition descriptions', () => {
|
|
const legitimateDescriptions = [
|
|
'Multiple vehicles stuck due to black ice',
|
|
'Road very slippery, saw 3 accidents this morning',
|
|
'Ice forming on bridges, drive carefully',
|
|
'Heavy snow, visibility poor',
|
|
'Salt trucks active, conditions improving',
|
|
'Watched 2 cars slide into ditch',
|
|
'School buses delayed due to ice',
|
|
'Emergency vehicles on scene',
|
|
'Road closed between Main and Oak'
|
|
];
|
|
|
|
legitimateDescriptions.forEach(description => {
|
|
const result = filter.analyzeProfanity(description);
|
|
expect(result.hasProfanity).toBe(false);
|
|
});
|
|
});
|
|
|
|
test('should reject inappropriate descriptions', () => {
|
|
const inappropriateDescriptions = [
|
|
'This fucking road is terrible',
|
|
'Shit everywhere, can\'t drive',
|
|
'Damn ice caused accident',
|
|
'These asshole drivers are crazy',
|
|
'What the hell is wrong with road crews'
|
|
];
|
|
|
|
inappropriateDescriptions.forEach(description => {
|
|
const result = filter.analyzeProfanity(description);
|
|
expect(result.hasProfanity).toBe(true);
|
|
expect(result.count).toBeGreaterThan(0);
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('Performance', () => {
|
|
test('should handle multiple rapid analyses', () => {
|
|
const startTime = Date.now();
|
|
|
|
for (let i = 0; i < 100; i++) {
|
|
filter.analyzeProfanity('This is a test message with some words');
|
|
}
|
|
|
|
const endTime = Date.now();
|
|
const duration = endTime - startTime;
|
|
|
|
// Should complete 100 analyses in under 1 second
|
|
expect(duration).toBeLessThan(1000);
|
|
});
|
|
});
|
|
});
|