TeamVioletEdifai commited on
Commit
72a88eb
·
verified ·
1 Parent(s): 77ab865

Upload cleaned_yelp_reviews(2).csv

Browse files

Started with 100k (of 700k) records

code
```
# AI Training Week 2

# Task 1 ************************************************************************************************
!pip install pyarrow datasets
import pandas as pd
import pyarrow.parquet as pq
from datasets import load_dataset

# Load the dataset using Hugging Face Datasets
dataset = load_dataset('yelp_review_full', split='train[:100]')

# Convert to a pandas DataFrame
df = dataset.to_pandas()

print("Successfully loaded 100 rows.")
print(df.head())

# Task 2 ************************************************************************************************
# Install necessary packages
!pip install imbalanced-learn langdetect nltk pyarrow

import pandas as pd
import pyarrow.parquet as pq
import matplotlib.pyplot as plt
import seaborn as sns
from langdetect import detect, LangDetectException
from imblearn.over_sampling import SMOTE
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize

# Uncomment and run the following lines on the first usage to download NLTK data
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('punkt_tab')

# Define stop words
stop_words = set(stopwords.words('english'))

# Function to tokenize and remove stop words
def preprocess_text(text):
if isinstance(text, str):
# Tokenize the text
tokens = word_tokenize(text.lower())
# Remove stop words and non-alphabetic tokens
filtered_tokens = [word for word in tokens if word.isalpha() and word not in stop_words]
# Rejoin tokens into a single string
return ' '.join(filtered_tokens)
else:
return ''

# Glance at the dataset by showing the first few rows.
if not df.empty:
print(df.head())
else:
print("DataFrame is empty due to loading error.")

# Visualization: Distribution of Ratings
if not df.empty:
sns.countplot(x='label', hue='label', data=df, palette='viridis', legend=False)
plt.title('Distribution of Ratings')
plt.xlabel('Rating')
plt.ylabel('Count')
plt.show()

# Generate descriptive statistics.
print("Descriptive Statistics:")
print(df.describe())
print("\n")

# Filter by language: Remove reviews in languages that you don’t want to include in the analysis.
def safe_detect(text):
try:
return detect(text)
except LangDetectException:
return 'unknown'

df['language'] = df['text'].apply(lambda x: safe_detect(x) if isinstance(x, str) else 'unknown')
df = df[df['language'] == 'en']
print(f"After language filtering (en): {len(df)} rows\n")

# Remove short reviews: Filter out reviews with too few words (e.g., less than 3 words).
df['word_count'] = df['text'].apply(lambda x: len(x.split()) if isinstance(x, str) else 0)
df = df[df['word_count'] >= 3]
print(f"After removing short reviews (>=3 words): {len(df)} rows\n")

# Remove stop words and tokenize the text
df['cleaned_text'] = df['text'].apply(preprocess_text)

# Optionally, update the word_count to reflect the cleaned text
df['cleaned_word_count'] = df['cleaned_text'].apply(lambda x: len(x.split()))

# Handle unbalanced data: Identify whether the data is skewed toward positive or negative reviews and apply balancing techniques.
print("Original label distribution:")
print(df['label'].value_counts())
print("\n")

# Convert text to numerical features using TF-IDF
vectorizer = TfidfVectorizer(max_features=5000)
X = vectorizer.fit_transform(df['cleaned_text'])
y = df['label']

# Apply SMOTE to balance the dataset
smote = SMOTE(random_state=42)
X_res, y_res = smote.fit_resample(X, y)

print("Resampled label distribution:")
print(pd.Series(y_res).value_counts())

# Save the cleaned and processed data
df.to_csv('cleaned_yelp_reviews.csv', index=False)
# df.to_parquet('cleaned_yelp_reviews.parquet', index=False)


# Task 3 ************************************************************************************************
# Create categorical variables from relevant features.
df['label'] = df['label'].astype('category')

# Optimize your features and ensure they're ready for modelling.
from sklearn.feature_extraction.text import TfidfVectorizer

vectorizer = TfidfVectorizer(max_features=5000)
X = vectorizer.fit_transform(df['text'])
y = df['label']

# Review and summarise feature statistics.
print(df.describe())
print(f'Number of TF-IDF features: {X.shape[1]}')
sparsity = 100.0 * X.nnz / (X.shape[0] * X.shape[1])
print(f'Sparsity of the TF-IDF matrix: {sparsity:.2f}%')

# Task 4 ************************************************************************************************
# Import necessary libraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix
from sklearn.dummy import DummyClassifier
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)

# Handle unbalanced data using SMOTE on the training set
from imblearn.over_sampling import SMOTE
smote = SMOTE(random_state=42)
X_train_res, y_train_res = smote.fit_resample(X_train, y_train)

# 1. Implement a simple baseline model (Logistic Regression)
baseline_model = LogisticRegression(max_iter=1000, random_state=42)
baseline_model.fit(X_train_res, y_train_res)

# Predict on the test set
y_pred = baseline_model.predict(X_test)

# 2. Evaluate the model and print out key metrics
print("=== Logistic Regression Model Evaluation ===")
print(f"Accuracy: {accuracy_score(y_test, y_pred):.4f}")
print(f"Precision: {precision_score(y_test, y_pred, average='weighted'):.4f}")
print(f"Recall: {recall_score(y_test, y_pred, average='weighted'):.4f}")
print(f"F1-Score: {f1_score(y_test, y_pred, average='weighted'):.4f}\n")
print("Classification Report:")
print(classification_report(y_test, y_pred))

# Plot Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(8,6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=baseline_model.classes_, yticklabels=baseline_model.classes_)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion Matrix')
plt.show()

# 3. Compare this baseline to a random classifier
random_clf = DummyClassifier(strategy='uniform', random_state=42)
random_clf.fit(X_train_res, y_train_res)
y_random = random_clf.predict(X_test)

print("=== Random Classifier Evaluation ===")
print(f"Accuracy: {accuracy_score(y_test, y_random):.4f}")
print(f"Precision: {precision_score(y_test, y_random, average='weighted'):.4f}")
print(f"Recall: {recall_score(y_test, y_random, average='weighted'):.4f}")
print(f"F1-Score: {f1_score(y_test, y_random, average='weighted'):.4f}\n")
print("Classification Report:")
print(classification_report(y_test, y_random))

# Plot Confusion Matrix for Random Classifier
cm_random = confusion_matrix(y_test, y_random)
plt.figure(figsize=(8,6))
sns.heatmap(cm_random, annot=True, fmt='d', cmap='Greens', xticklabels=random_clf.classes_, yticklabels=random_clf.classes_)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion Matrix - Random Classifier')
plt.show()
```

Files changed (2) hide show
  1. .gitattributes +1 -0
  2. cleaned_yelp_reviews(2).csv +3 -0
.gitattributes CHANGED
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ cleaned_yelp_reviews(2).csv filter=lfs diff=lfs merge=lfs -text
cleaned_yelp_reviews(2).csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee7282582e610bfdfd757dd620057e306205ba994c7fc62ea94ef84cb8aacdfd
3
+ size 110563999