File size: 1,619 Bytes
f3ca73d 4d58dd6 f3ca73d 4d58dd6 07ff7ab 786baa3 4d58dd6 786baa3 4d58dd6 786baa3 dba4913 786baa3 dba4913 786baa3 dba4913 786baa3 dba4913 786baa3 dba4913 786baa3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
from fastapi import APIRouter, Query, HTTPException
from better_profanity import profanity
# Create a router for the profanity API
router = APIRouter()
@router.get("/profanity/check/")
def check_profanity(text: str = Query(..., description="Text to be checked")):
"""
Check if the text contains profanity and return details.
"""
try:
# Load default dictionary
profanity.load_censor_words()
# Detect profanity
contains_profanity = profanity.contains_profanity(text)
censored_text = profanity.censor(text)
# Extract offensive words
words = text.split()
offensive_words = [
word for word in words if profanity.contains_profanity(word)
]
# Calculate percentage of offensive words
offensive_word_count = len(offensive_words)
total_word_count = len(words)
offensive_percentage = (
(offensive_word_count / total_word_count) * 100 if total_word_count > 0 else 0
)
# Return response
return {
"original_text": text,
"contains_profanity": contains_profanity,
"censored_text": censored_text,
"offensive_words": offensive_words,
"offensive_word_count": offensive_word_count,
"total_word_count": total_word_count,
"offensive_percentage": offensive_percentage,
}
except Exception as e:
# Handle errors gracefully
raise HTTPException(
status_code=500,
detail=f"An error occurred while processing the text: {str(e)}"
) |