Upload files to "/"

This commit is contained in:
202210715095 FAHRIZAL SETIAWAN 2025-11-22 08:46:59 +07:00
parent 7ce80b6099
commit 12c1b65bb4

View File

@ -0,0 +1,318 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "qBYcPYAb059g",
"outputId": "ac27d686-2d15-4b2f-cc13-963fadf3100f"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Masukkan jumlah dokumen yang ingin dimasukkan: 4\n"
]
}
],
"source": [
"# Input jumlah dokumen\n",
"import pandas as pd\n",
"n = int(input(\"Masukkan jumlah dokumen yang ingin dimasukkan: \"))"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "mo-yt5Ob1N8j",
"outputId": "21da9ff1-2954-4b39-b207-017d03d0294f"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Masukkan teks untuk dokumen ke-1: saya ingin memasak\n",
"Masukkan teks untuk dokumen ke-2: masak ayam goreng sepertinya enak\n",
"Masukkan teks untuk dokumen ke-3: enakan ayam goreng atau ikan goreng\n",
"Masukkan teks untuk dokumen ke-4: dibarengi dengan saus sepertinya akan lezat\n",
"\n",
"=== Dokumen yang Dimasukkan ===\n",
"Doc 1: saya ingin memasak\n",
"Doc 2: masak ayam goreng sepertinya enak\n",
"Doc 3: enakan ayam goreng atau ikan goreng\n",
"Doc 4: dibarengi dengan saus sepertinya akan lezat\n"
]
}
],
"source": [
"# Input teks dokumen satu per satu\n",
"documents = []\n",
"for i in range(n):\n",
" teks = input(f\"Masukkan teks untuk dokumen ke-{i+1}: \")\n",
" documents.append(teks)\n",
"\n",
"print(\"\\n=== Dokumen yang Dimasukkan ===\")\n",
"for i, doc in enumerate(documents):\n",
" print(f\"Doc {i+1}: {doc}\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "FkmxRAFq1oDK",
"outputId": "e451e801-161a-4618-f047-97893cc7a68b"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n",
"=== Hasil Tokenisasi ===\n",
"Doc 1: ['saya', 'ingin', 'memasak']\n",
"Doc 2: ['masak', 'ayam', 'goreng', 'sepertinya', 'enak']\n",
"Doc 3: ['enakan', 'ayam', 'goreng', 'atau', 'ikan', 'goreng']\n",
"Doc 4: ['dibarengi', 'dengan', 'saus', 'sepertinya', 'akan', 'lezat']\n"
]
}
],
"source": [
"# Tahap Tokenisasi\n",
"tokenized_docs = []\n",
"for doc in documents:\n",
" tokens = doc.lower().split()\n",
" tokenized_docs.append(tokens)\n",
"\n",
"print(\"\\n=== Hasil Tokenisasi ===\")\n",
"for i, tokens in enumerate(tokenized_docs):\n",
" print(f\"Doc {i+1}: {tokens}\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "ybC1Vo2C_c3q",
"outputId": "f1e97af1-3af9-4dee-b59a-2a8baa79a370"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n",
"=== Corpus Keseluruhan (Semua Kata dari Semua Dokumen) ===\n",
"['saya', 'ingin', 'memasak', 'masak', 'ayam', 'goreng', 'sepertinya', 'enak', 'enakan', 'ayam', 'goreng', 'atau', 'ikan', 'goreng', 'dibarengi', 'dengan', 'saus', 'sepertinya', 'akan', 'lezat']\n",
"Jumlah total kata dalam seluruh dokumen: 20\n"
]
}
],
"source": [
"# Pembuatan Corpus\n",
"corpus_all = [word for doc in tokenized_docs for word in doc]\n",
"\n",
"print(\"\\n=== Corpus Keseluruhan (Semua Kata dari Semua Dokumen) ===\")\n",
"print(corpus_all)\n",
"print(f\"Jumlah total kata dalam seluruh dokumen: {len(corpus_all)}\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "s6S-Ma4R1xuq",
"outputId": "7643748e-937e-4724-8db0-0a768ad7182f"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n",
"=== Vocabulary (Kata Unik) ===\n",
"['akan', 'atau', 'ayam', 'dengan', 'dibarengi', 'enak', 'enakan', 'goreng', 'ikan', 'ingin', 'lezat', 'masak', 'memasak', 'saus', 'saya', 'sepertinya']\n",
"Jumlah kata unik (vocabulary size): 16\n",
"\n",
"=== Vocabulary (Kata Unik) ===\n",
" 1. akan\n",
" 2. atau\n",
" 3. ayam\n",
" 4. dengan\n",
" 5. dibarengi\n",
" 6. enak\n",
" 7. enakan\n",
" 8. goreng\n",
" 9. ikan\n",
"10. ingin\n",
"11. lezat\n",
"12. masak\n",
"13. memasak\n",
"14. saus\n",
"15. saya\n",
"16. sepertinya\n",
"\n",
"Jumlah kata unik (vocabulary size): 16\n"
]
}
],
"source": [
"# Pembuatan Vocabulary\n",
"vocabulary = sorted(set(corpus_all))\n",
"\n",
"print(\"\\n=== Vocabulary (Kata Unik) ===\")\n",
"print(vocabulary)\n",
"print(f\"Jumlah kata unik (vocabulary size): {len(vocabulary)}\")\n",
"\n",
"\n",
"vocabulary = sorted(set(corpus_all))\n",
"\n",
"print(\"\\n=== Vocabulary (Kata Unik) ===\")\n",
"for idx, word in enumerate(vocabulary, start=1):\n",
" print(f\"{idx:>2}. {word}\")\n",
"print(f\"\\nJumlah kata unik (vocabulary size): {len(vocabulary)}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ShevCTva2Fg9"
},
"outputs": [],
"source": [
"# Representasi Numerik (Matriks BoW)\n",
"bow_matrix = []\n",
"for doc in tokenized_docs:\n",
" vector = [doc.count(word) for word in vocabulary]\n",
" bow_matrix.append(vector)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "-yB6D2pY2M0E",
"outputId": "b6b2f4d3-da8b-4aee-e9ce-034def4d5cf7"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"=== Matriks Bag of Words ===\n",
" ai belajar dan data di kampus mahasiswa nlp saya science suka\n",
"D1 0 1 0 0 1 1 0 1 1 0 0\n",
"D2 1 1 0 0 0 0 0 0 1 0 1\n",
"D3 0 1 1 1 0 0 1 1 0 1 0\n"
]
}
],
"source": [
"df_bow = pd.DataFrame(bow_matrix, columns=vocabulary)\n",
"df_bow.index = [f\"D{i}\" for i in range(1, len(documents)+1)] # ubah label indeks jadi D1, D2, D3\n",
"\n",
"print(\"\\n=== Matriks Bag of Words ===\")\n",
"print(df_bow)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "8ruf5vKL2rGD",
"outputId": "65a4674e-1c01-4833-ec55-f66f77b8b6c2"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"=== Tabel Frekuensi Kata (Keseluruhan Dokumen) ===\n",
" Kata Frekuensi\n",
"0 belajar 3\n",
"1 nlp 2\n",
"2 saya 2\n",
"3 dan 1\n",
"4 ai 1\n",
"5 data 1\n",
"6 di 1\n",
"7 mahasiswa 1\n",
"8 kampus 1\n",
"9 science 1\n",
"10 suka 1\n",
"Frekuensi kata: 11\n"
]
}
],
"source": [
"# Membuat Tabel Frekuensi Kata (Total dari seluruh dokumen)\n",
"word_frequencies = df_bow.sum().sort_values(ascending=False).reset_index()\n",
"word_frequencies.columns = [\"Kata\", \"Frekuensi\"]\n",
"\n",
"print(\"\\n=== Tabel Frekuensi Kata (Keseluruhan Dokumen) ===\")\n",
"print(word_frequencies)\n",
"print(f\"Frekuensi kata: {len(word_frequencies)}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NQjExannHuj0"
},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
}
},
"nbformat": 4,
"nbformat_minor": 0
}