Praktikum_NLP/Klasifikasi Teks FNN.ipynb
2025-11-22 11:10:28 +07:00

170 lines
12 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"id": "f4a1399a-f23d-4060-a07e-bce5a5c7ddac",
"metadata": {},
"source": [
"# Klasifikasi Teks menggunakan ANN\n",
"## Arif R Dwiyanto\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "53a214ae-c9cf-4d46-925d-068f1685537b",
"metadata": {
"scrolled": true
},
"outputs": [
{
"ename": "ValueError",
"evalue": "All arrays must be of the same length",
"output_type": "error",
"traceback": [
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
"\u001b[31mValueError\u001b[39m Traceback (most recent call last)",
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 28\u001b[39m\n\u001b[32m 11\u001b[39m \u001b[38;5;66;03m# -----------------------------------------\u001b[39;00m\n\u001b[32m 12\u001b[39m \u001b[38;5;66;03m# 1. Contoh Dataset\u001b[39;00m\n\u001b[32m 13\u001b[39m \u001b[38;5;66;03m# -----------------------------------------\u001b[39;00m\n\u001b[32m 14\u001b[39m \u001b[38;5;66;03m# Anda bisa mengganti dataset ini dengan dataset lain (CSV, JSON, dll)\u001b[39;00m\n\u001b[32m 16\u001b[39m data = {\n\u001b[32m 17\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mtext\u001b[39m\u001b[33m\"\u001b[39m: [\n\u001b[32m 18\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mTempat ini sangat nyaman dan bersih.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m (...)\u001b[39m\u001b[32m 25\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mlabel\u001b[39m\u001b[33m\"\u001b[39m: [\u001b[33m\"\u001b[39m\u001b[33mpositive\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mnegative\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mpositive\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mnegative\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mpositive\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mnegative\u001b[39m\u001b[33m\"\u001b[39m]\n\u001b[32m 26\u001b[39m }\n\u001b[32m---> \u001b[39m\u001b[32m28\u001b[39m df = \u001b[43mpd\u001b[49m\u001b[43m.\u001b[49m\u001b[43mDataFrame\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 30\u001b[39m \u001b[38;5;66;03m# -----------------------------------------\u001b[39;00m\n\u001b[32m 31\u001b[39m \u001b[38;5;66;03m# 2. Split Train & Test\u001b[39;00m\n\u001b[32m 32\u001b[39m \u001b[38;5;66;03m# -----------------------------------------\u001b[39;00m\n\u001b[32m 33\u001b[39m X_train, X_test, y_train, y_test = train_test_split(\n\u001b[32m 34\u001b[39m df[\u001b[33m\"\u001b[39m\u001b[33mtext\u001b[39m\u001b[33m\"\u001b[39m], df[\u001b[33m\"\u001b[39m\u001b[33mlabel\u001b[39m\u001b[33m\"\u001b[39m], test_size=\u001b[32m0.3\u001b[39m, random_state=\u001b[32m42\u001b[39m\n\u001b[32m 35\u001b[39m )\n",
"\u001b[36mFile \u001b[39m\u001b[32m~\\miniforge3\\Lib\\site-packages\\pandas\\core\\frame.py:782\u001b[39m, in \u001b[36mDataFrame.__init__\u001b[39m\u001b[34m(self, data, index, columns, dtype, copy)\u001b[39m\n\u001b[32m 776\u001b[39m mgr = \u001b[38;5;28mself\u001b[39m._init_mgr(\n\u001b[32m 777\u001b[39m data, axes={\u001b[33m\"\u001b[39m\u001b[33mindex\u001b[39m\u001b[33m\"\u001b[39m: index, \u001b[33m\"\u001b[39m\u001b[33mcolumns\u001b[39m\u001b[33m\"\u001b[39m: columns}, dtype=dtype, copy=copy\n\u001b[32m 778\u001b[39m )\n\u001b[32m 780\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data, \u001b[38;5;28mdict\u001b[39m):\n\u001b[32m 781\u001b[39m \u001b[38;5;66;03m# GH#38939 de facto copy defaults to False only in non-dict cases\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m782\u001b[39m mgr = \u001b[43mdict_to_mgr\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mindex\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcolumns\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m=\u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtyp\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmanager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 783\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data, ma.MaskedArray):\n\u001b[32m 784\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mnumpy\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mma\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m mrecords\n",
"\u001b[36mFile \u001b[39m\u001b[32m~\\miniforge3\\Lib\\site-packages\\pandas\\core\\internals\\construction.py:503\u001b[39m, in \u001b[36mdict_to_mgr\u001b[39m\u001b[34m(data, index, columns, dtype, typ, copy)\u001b[39m\n\u001b[32m 499\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 500\u001b[39m \u001b[38;5;66;03m# dtype check to exclude e.g. range objects, scalars\u001b[39;00m\n\u001b[32m 501\u001b[39m arrays = [x.copy() \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(x, \u001b[33m\"\u001b[39m\u001b[33mdtype\u001b[39m\u001b[33m\"\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m x \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m arrays]\n\u001b[32m--> \u001b[39m\u001b[32m503\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43marrays_to_mgr\u001b[49m\u001b[43m(\u001b[49m\u001b[43marrays\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcolumns\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mindex\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m=\u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtyp\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtyp\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconsolidate\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcopy\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[36mFile \u001b[39m\u001b[32m~\\miniforge3\\Lib\\site-packages\\pandas\\core\\internals\\construction.py:114\u001b[39m, in \u001b[36marrays_to_mgr\u001b[39m\u001b[34m(arrays, columns, index, dtype, verify_integrity, typ, consolidate)\u001b[39m\n\u001b[32m 111\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m verify_integrity:\n\u001b[32m 112\u001b[39m \u001b[38;5;66;03m# figure out the index, if necessary\u001b[39;00m\n\u001b[32m 113\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m index \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m114\u001b[39m index = \u001b[43m_extract_index\u001b[49m\u001b[43m(\u001b[49m\u001b[43marrays\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 115\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 116\u001b[39m index = ensure_index(index)\n",
"\u001b[36mFile \u001b[39m\u001b[32m~\\miniforge3\\Lib\\site-packages\\pandas\\core\\internals\\construction.py:677\u001b[39m, in \u001b[36m_extract_index\u001b[39m\u001b[34m(data)\u001b[39m\n\u001b[32m 675\u001b[39m lengths = \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mset\u001b[39m(raw_lengths))\n\u001b[32m 676\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(lengths) > \u001b[32m1\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m677\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33m\"\u001b[39m\u001b[33mAll arrays must be of the same length\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 679\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m have_dicts:\n\u001b[32m 680\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 681\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mMixing dicts with non-Series may lead to ambiguous ordering.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 682\u001b[39m )\n",
"\u001b[31mValueError\u001b[39m: All arrays must be of the same length"
]
}
],
"source": [
"# ---------------------------------------------------------\n",
"# Klasifikasi Teks dengan TF-IDF + Feedforward Neural Network\n",
"# ---------------------------------------------------------\n",
"\n",
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"from sklearn.neural_network import MLPClassifier\n",
"from sklearn.metrics import classification_report, confusion_matrix\n",
"\n",
"# -----------------------------------------\n",
"# 1. Contoh Dataset\n",
"# -----------------------------------------\n",
"# Anda bisa mengganti dataset ini dengan dataset lain (CSV, JSON, dll)\n",
"\n",
"data = {\n",
" \"text\": [\n",
" \"Tempat ini sangat nyaman dan bersih.\"\n",
" \"Akses menuju ke sana cukup sulit dan membingungkan.\"\n",
" \"Pelayanan staf di sini juga sangat ramah dan cepat tanggap.\"\n",
" \"Lokasi kafe ini strategis dan mudah ditemukan.\"\n",
" \"Suasananya kadang terlalu bising karena sering ada keramaian.\"\n",
" \"Pilihan menu minumannya sangat beragam dan lezat.\"\n",
" ],\n",
" \"label\": [\"positive\", \"negative\", \"positive\", \"negative\", \"positive\", \"negative\"]\n",
"}\n",
"\n",
"df = pd.DataFrame(data)\n",
"\n",
"# -----------------------------------------\n",
"# 2. Split Train & Test\n",
"# -----------------------------------------\n",
"X_train, X_test, y_train, y_test = train_test_split(\n",
" df[\"text\"], df[\"label\"], test_size=0.3, random_state=42\n",
")\n",
"\n",
"# -----------------------------------------\n",
"# 3. TF-IDF Vectorization\n",
"# -----------------------------------------\n",
"tfidf = TfidfVectorizer(max_features=5000)\n",
"X_train_tfidf = tfidf.fit_transform(X_train)\n",
"X_test_tfidf = tfidf.transform(X_test)\n",
"\n",
"# -----------------------------------------\n",
"# 4. Feedforward ANN (MLPClassifier)\n",
"# -----------------------------------------\n",
"model = MLPClassifier(\n",
" hidden_layer_sizes=(256, 64),\n",
" activation='relu',\n",
" solver='adam',\n",
" max_iter=500,\n",
" random_state=42\n",
")\n",
"\n",
"model.fit(X_train_tfidf, y_train)\n",
"\n",
"# -----------------------------------------\n",
"# 5. Evaluasi Model\n",
"# -----------------------------------------\n",
"y_pred = model.predict(X_test_tfidf)\n",
"\n",
"print(\"=== Classification Report ===\")\n",
"print(classification_report(y_test, y_pred))\n",
"\n",
"print(\"=== Confusion Matrix ===\")\n",
"print(confusion_matrix(y_test, y_pred))\n",
"\n",
"# -----------------------------------------\n",
"# 6. Prediksi Teks Baru\n",
"# -----------------------------------------\n",
"#sample_text = [\"barang bagus luar biasa\"]\n",
"sample_text = [\"Tempat nyaman, saya suka\"]\n",
"sample_vec = tfidf.transform(sample_text)\n",
"prediction = model.predict(sample_vec)\n",
"\n",
"print(\"\\nPrediksi untuk:\", sample_text[0])\n",
"print(\"Hasil:\", prediction[0])\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9f7d90fe-4af4-446c-9547-c9312bfa6fc7",
"metadata": {},
"outputs": [],
"source": [
"#sample_text = [\"barang bagus luar biasa\"]\n",
"sample_text = [\"Tempat bising saya tidak suka\"]\n",
"sample_vec = tfidf.transform(sample_text)\n",
"prediction = model.predict(sample_vec)\n",
"print(\"\\nPrediksi untuk:\", sample_text[0])\n",
"print(\"Hasil:\", prediction[0])\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0413b4bf-beb1-483b-a081-b540fce1b21c",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "d714bd96-09a0-4439-8286-0cb39e2fb4df",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}