-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathai_text_classifier.py
More file actions
84 lines (66 loc) · 2.6 KB
/
ai_text_classifier.py
File metadata and controls
84 lines (66 loc) · 2.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# -*- coding: utf-8 -*-
"""AI-Text-Classifier.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1gj3uMSilBy_r53g1k3hBYiANMfbIWdb7
"""
!pip install catboost
import pandas as pd
import re
import nltk
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from catboost import CatBoostClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from collections import Counter
# Download NLTK stopwords if you haven't already
nltk.download('stopwords')
# Load your dataset (replace with your actual file path)
data = pd.read_csv('/content/hippoCorpusV2.csv')
# Check class distribution before filtering
print("Class distribution before filtering:", Counter(data['memType']))
# Map the labels (including "recalled")
data['label'] = data['memType'].map({'recalled': 0, 'imagined': 1, 'retold': 2})
# Initialize the stopwords
stop_words = set(nltk.corpus.stopwords.words('english'))
# Preprocess text function
def preprocess_text(text):
text = text.lower() # Lowercase
text = re.sub(r'[^a-zA-Z\s]', '', text) # Remove punctuation and numbers
tokens = text.split()
tokens = [word for word in tokens if word not in stop_words] # Remove stopwords
return ' '.join(tokens)
# Preprocess the story column
data['processed_story'] = data['story'].apply(preprocess_text)
# Check class distribution after mapping
print("Class distribution after mapping:", Counter(data['label']))
# Split the dataset into training and testing sets
X = data['processed_story']
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42)
# Create a pipeline with TF-IDF and CatBoost
model = make_pipeline(
TfidfVectorizer(),
CatBoostClassifier(
iterations=500,
learning_rate=0.1,
depth=6,
verbose=0, # Suppress output
random_seed=42
)
)
# Train the model
model.fit(X_train, y_train)
# Evaluate the model on the test set
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred, target_names=['recalled', 'imagined', 'retold']))
# Function to predict the category of a user-inputted story
def predict_story_category():
user_story = input("Please enter your story: ")
cleaned_story = preprocess_text(user_story)
prediction = model.predict([cleaned_story])
label = 'recalled' if prediction[0] == 0 else 'imagined' if prediction[0] == 1 else 'retold'
print(f"\nThe model predicts that the story is '{label}'.")
# Example usage
predict_story_category()