fork download
  1. import numpy as np
  2. import tensorflow as tf
  3. from tensorflow.keras.models import Sequential
  4. from tensorflow.keras.layers import Embedding, SimpleRNN, Dense
  5. text = "This is a sample text for language modeling using RNN."
  6. chars = sorted(set(text))
  7. char_to_index = {char: index for index, char in enumerate(chars)}
  8. index_to_char = {index: char for index, char in enumerate(chars)}
  9. text_indices = [char_to_index[char] for char in text]
  10. seq_length = 20
  11. sequences = []
  12. next_char = []
  13. for i in range(0, len(text_indices) - seq_length):
  14. sequences.append(text_indices[i : i + seq_length])
  15. next_char.append(text_indices[i + seq_length])
  16. X = np.array(sequences)
  17. y = np.array(next_char)
  18. 17
  19. model = Sequential([Embedding(input_dim=len(chars), output_dim=50, input_length=seq_length),SimpleRNN(100, return_sequences=False),Dense(len(chars), activation="softmax")])
  20. model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
  21. model.fit(X, y, batch_size=64, epochs=50)
  22. seed_text = "This is a sample te"
  23. generated_text = seed_text
  24. num_chars_to_generate = 100
  25. for _ in range(num_chars_to_generate):
  26. seed_indices = [char_to_index[char] for char in seed_text]
  27.  
  28. if len(seed_indices) < seq_length:
  29. diff = seq_length - len(seed_indices)
  30. seed_indices = [0] * diff + seed_indices
  31.  
  32. seed_indices = np.array(seed_indices).reshape(1, -1)
  33. next_index = model.predict(seed_indices).argmax()
  34. next_char = index_to_char[next_index]
  35. generated_text += next_char
  36. seed_text = seed_text[1:] + next_char
  37. print(generated_text)
Success #stdin #stdout #stderr 2.62s 240060KB
stdin
Standard input is empty
stdout
Epoch 1/50

34/34 [==============================] - 1s 18ms/sample - loss: 3.0547
Epoch 2/50

34/34 [==============================] - 0s 314us/sample - loss: 2.9641
Epoch 3/50

34/34 [==============================] - 0s 307us/sample - loss: 2.8737
Epoch 4/50

34/34 [==============================] - 0s 325us/sample - loss: 2.7799
Epoch 5/50

34/34 [==============================] - 0s 309us/sample - loss: 2.6815
Epoch 6/50

34/34 [==============================] - 0s 297us/sample - loss: 2.5791
Epoch 7/50

34/34 [==============================] - 0s 270us/sample - loss: 2.4725
Epoch 8/50

34/34 [==============================] - 0s 180us/sample - loss: 2.3598
Epoch 9/50

34/34 [==============================] - 0s 181us/sample - loss: 2.2427
Epoch 10/50

34/34 [==============================] - 0s 193us/sample - loss: 2.1274
Epoch 11/50

34/34 [==============================] - 0s 291us/sample - loss: 2.0177
Epoch 12/50

34/34 [==============================] - 0s 241us/sample - loss: 1.9132
Epoch 13/50

34/34 [==============================] - 0s 199us/sample - loss: 1.8132
Epoch 14/50

34/34 [==============================] - 0s 311us/sample - loss: 1.7179
Epoch 15/50

34/34 [==============================] - 0s 281us/sample - loss: 1.6260
Epoch 16/50

34/34 [==============================] - 0s 175us/sample - loss: 1.5356
Epoch 17/50

34/34 [==============================] - 0s 176us/sample - loss: 1.4454
Epoch 18/50

34/34 [==============================] - 0s 178us/sample - loss: 1.3563
Epoch 19/50

34/34 [==============================] - 0s 175us/sample - loss: 1.2699
Epoch 20/50

34/34 [==============================] - 0s 178us/sample - loss: 1.1876
Epoch 21/50

34/34 [==============================] - 0s 187us/sample - loss: 1.1090
Epoch 22/50

34/34 [==============================] - 0s 190us/sample - loss: 1.0337
Epoch 23/50

34/34 [==============================] - 0s 187us/sample - loss: 0.9622
Epoch 24/50

34/34 [==============================] - 0s 178us/sample - loss: 0.8951
Epoch 25/50

34/34 [==============================] - 0s 179us/sample - loss: 0.8318
Epoch 26/50

34/34 [==============================] - 0s 182us/sample - loss: 0.7718
Epoch 27/50

34/34 [==============================] - 0s 182us/sample - loss: 0.7147
Epoch 28/50

34/34 [==============================] - 0s 182us/sample - loss: 0.6607
Epoch 29/50

34/34 [==============================] - 0s 178us/sample - loss: 0.6099
Epoch 30/50

34/34 [==============================] - 0s 175us/sample - loss: 0.5620
Epoch 31/50

34/34 [==============================] - 0s 189us/sample - loss: 0.5167
Epoch 32/50

34/34 [==============================] - 0s 306us/sample - loss: 0.4746
Epoch 33/50

34/34 [==============================] - 0s 315us/sample - loss: 0.4357
Epoch 34/50

34/34 [==============================] - 0s 337us/sample - loss: 0.3998
Epoch 35/50

34/34 [==============================] - 0s 318us/sample - loss: 0.3666
Epoch 36/50

34/34 [==============================] - 0s 313us/sample - loss: 0.3363
Epoch 37/50

34/34 [==============================] - 0s 322us/sample - loss: 0.3086
Epoch 38/50

34/34 [==============================] - 0s 323us/sample - loss: 0.2831
Epoch 39/50

34/34 [==============================] - 0s 303us/sample - loss: 0.2597
Epoch 40/50

34/34 [==============================] - 0s 298us/sample - loss: 0.2386
Epoch 41/50

34/34 [==============================] - 0s 300us/sample - loss: 0.2194
Epoch 42/50

34/34 [==============================] - 0s 241us/sample - loss: 0.2018
Epoch 43/50

34/34 [==============================] - 0s 170us/sample - loss: 0.1857
Epoch 44/50

34/34 [==============================] - 0s 167us/sample - loss: 0.1711
Epoch 45/50

34/34 [==============================] - 0s 168us/sample - loss: 0.1580
Epoch 46/50

34/34 [==============================] - 0s 174us/sample - loss: 0.1461
Epoch 47/50

34/34 [==============================] - 0s 2ms/sample - loss: 0.1354
Epoch 48/50

34/34 [==============================] - 0s 183us/sample - loss: 0.1256
Epoch 49/50

34/34 [==============================] - 0s 181us/sample - loss: 0.1169
Epoch 50/50

34/34 [==============================] - 0s 177us/sample - loss: 0.1089
This is a sample teorrrnrraangiagodmongggang sang aggNngnggsangnsangaagornanggsanonsangiaggoranggsangnsang asrgganggsan
stderr
WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.