# import libraries
try:
# %tensorflow_version only exists in Colab.
!pip install tf-nightly
except Exception:
pass
import tensorflow as tf
import pandas as pd
from tensorflow import keras
!pip install tensorflow-datasets
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
import requests
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Embedding, LSTM, Bidirectional
print(tf.__version__)
def download_file(url, local_filename):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
train_url = "https://cdn.freecodecamp.org/project-data/sms/train-data.tsv"
test_url = "https://cdn.freecodecamp.org/project-data/sms/valid-data.tsv"
train_file_path = "train-data.tsv"
test_file_path = "valid-data.tsv"
download_file(train_url, train_file_path)
download_file(test_url, test_file_path)
print(f"Downloaded {train_file_path} and {test_file_path}")
Downloaded train-data.tsv and valid-data.tsv
train_data = pd.read_csv(train_file_path, sep='\t', header=None, names=['label', 'text'])
test_data = pd.read_csv(test_file_path, sep='\t', header=None, names=['label', 'text'])
train_data['label'] = train_data['label'].map({'ham': 0, 'spam': 1})
test_data['label'] = test_data['label'].map({'ham': 0, 'spam': 1})
print("NaN values in train labels:", train_data['label'].isna().sum())
print("NaN values in test labels:", test_data['label'].isna().sum())
tokenizer = Tokenizer(num_words=10000, oov_token='<OOV>')
tokenizer.fit_on_texts(train_data['text'])
x_train = tokenizer.texts_to_sequences(train_data['text'])
x_test = tokenizer.texts_to_sequences(test_data['text'])
x_train = pad_sequences(x_train, padding='post', maxlen=100)
x_test = pad_sequences(x_test, padding='post', maxlen=100)
y_train = train_data['label'].values
y_test = test_data['label'].values
NaN values in train labels: 0 NaN values in test labels: 0
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=10000, output_dim=128),
Bidirectional(LSTM(128, return_sequences=True)),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(128, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
# Train the model
model.fit(x_train, y_train, epochs=10)
# Evaluate the model
print("---------------------------------")
test_loss, test_accuracy = model.evaluate(x_test, y_test)
print(f"Test Loss: {test_loss}")
print(f"Test Accuracy: {test_accuracy}")
Epoch 1/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 7s 37ms/step - accuracy: 0.8489 - loss: 1.9531 Epoch 2/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 37ms/step - accuracy: 0.9805 - loss: 0.2765 Epoch 3/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 37ms/step - accuracy: 0.9831 - loss: 0.1469 Epoch 4/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 36ms/step - accuracy: 0.9932 - loss: 0.0775 Epoch 5/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 36ms/step - accuracy: 0.9905 - loss: 0.0730 Epoch 6/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 36ms/step - accuracy: 0.9974 - loss: 0.0584 Epoch 7/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 37ms/step - accuracy: 0.9990 - loss: 0.0363 Epoch 8/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 36ms/step - accuracy: 0.9989 - loss: 0.0356 Epoch 9/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 37ms/step - accuracy: 0.9994 - loss: 0.0289 Epoch 10/10 131/131 ━━━━━━━━━━━━━━━━━━━━ 5s 36ms/step - accuracy: 0.9965 - loss: 0.0416 --------------------------------- 44/44 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - accuracy: 0.9827 - loss: 0.1173 Test Loss: 0.11725916713476181 Test Accuracy: 0.9813218116760254
# function to predict messages based on model
# (should return list containing prediction and label, ex. [0.008318834938108921, 'ham'])
def predict_message(pred_text):
pred_sequence = tokenizer.texts_to_sequences([pred_text])
pred_padded = pad_sequences(pred_sequence, padding='post', maxlen=100)
prediction = model.predict(pred_padded)[0][0] # Get the prediction probability
label = 'ham' if prediction < 0.5 else 'spam'
return [float(prediction), label]
pred_text = "how are you doing today?"
prediction = predict_message(pred_text)
print(prediction)
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 177ms/step [0.001112144673243165, 'ham']
# Run this cell to test your function and model. Do not modify contents.
def test_predictions():
test_messages = ["how are you doing today",
"sale today! to stop texts call 98912460324",
"i dont want to go. can we try it a different day? available sat",
"our new mobile video service is live. just install on your phone to start watching.",
"you have won £1000 cash! call to claim your prize.",
"i'll bring it tomorrow. don't forget the milk.",
"wow, is your arm alright. that happened to me one time too"
]
test_answers = ["ham", "spam", "ham", "spam", "spam", "ham", "ham"]
passed = True
for msg, ans in zip(test_messages, test_answers):
prediction = predict_message(msg)
if prediction[1] != ans:
print("false")
print(prediction[0])
passed = False
if passed:
print("You passed the challenge. Great job!")
else:
print("You haven't passed yet. Keep trying.")
test_predictions()
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 24ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 24ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 23ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 16ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 16ms/step You passed the challenge. Great job!