fork download
  1. import numpy as np
  2. import tensorflow as tf
  3. from tensorflow.keras.models import Sequential
  4. from tensorflow.keras.layers import Embedding, SimpleRNN, Dense
  5. # Sample text data
  6. text = "This is a sample text for language modeling using RNN."
  7. # Create a set of unique characters in the text
  8. chars = sorted(set(text))
  9. char_to_index = {char: index for index, char in enumerate(chars)}
  10. index_to_char = {index: char for index, char in enumerate(chars)}
  11. # Convert text to a sequence of character indices
  12. text_indices = [char_to_index[char] for char in text]
  13. # Create input-output pairs for training
  14. seq_length = 20
  15. sequences = []
  16. next_char = []
  17. for i in range(0, len(text_indices) - seq_length):
  18. sequences.append(text_indices[i : i + seq_length])
  19. next_char.append(text_indices[i + seq_length])
  20. # Convert sequences and next_char to numpy arrays
  21. X = np.array(sequences)
  22. y = np.array(next_char)
  23. # Build the RNN model
  24. 17
  25. model = Sequential([
  26. Embedding(input_dim=len(chars), output_dim=50, input_length=seq_length),
  27. SimpleRNN(100, return_sequences=False),
  28. Dense(len(chars), activation="softmax")
  29. ])
  30. model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
  31. # Train the model
  32. model.fit(X, y, batch_size=64, epochs=50)
  33. # Generate text using the trained model
  34. seed_text = "This is a sample te"
  35. generated_text = seed_text
  36. num_chars_to_generate = 100
  37. for _ in range(num_chars_to_generate):
  38. seed_indices = [char_to_index[char] for char in seed_text]
  39.  
  40. # Check if the seed sequence length matches the model's input length
  41. if len(seed_indices) < seq_length:
  42. diff = seq_length - len(seed_indices)
  43. seed_indices = [0] * diff + seed_indices
  44.  
  45. seed_indices = np.array(seed_indices).reshape(1, -1)
  46. next_index = model.predict(seed_indices).argmax()
  47. next_char = index_to_char[next_index]
  48. generated_text += next_char
  49. seed_text = seed_text[1:] + next_char
  50. print(generated_text)
Success #stdin #stdout #stderr 3.25s 239320KB
stdin
Standard input is empty
stdout
Epoch 1/50

34/34 [==============================] - 1s 26ms/sample - loss: 3.1168
Epoch 2/50

34/34 [==============================] - 0s 245us/sample - loss: 3.0151
Epoch 3/50

34/34 [==============================] - 0s 236us/sample - loss: 2.9173
Epoch 4/50

34/34 [==============================] - 0s 237us/sample - loss: 2.8181
Epoch 5/50

34/34 [==============================] - 0s 225us/sample - loss: 2.7166
Epoch 6/50

34/34 [==============================] - 0s 222us/sample - loss: 2.6148
Epoch 7/50

34/34 [==============================] - 0s 255us/sample - loss: 2.5132
Epoch 8/50

34/34 [==============================] - 0s 368us/sample - loss: 2.4056
Epoch 9/50

34/34 [==============================] - 0s 377us/sample - loss: 2.2887
Epoch 10/50

34/34 [==============================] - 0s 370us/sample - loss: 2.1725
Epoch 11/50

34/34 [==============================] - 0s 390us/sample - loss: 2.0693
Epoch 12/50

34/34 [==============================] - 0s 391us/sample - loss: 1.9791
Epoch 13/50

34/34 [==============================] - 0s 378us/sample - loss: 1.8919
Epoch 14/50

34/34 [==============================] - 0s 387us/sample - loss: 1.8005
Epoch 15/50

34/34 [==============================] - 0s 398us/sample - loss: 1.7045
Epoch 16/50

34/34 [==============================] - 0s 376us/sample - loss: 1.6080
Epoch 17/50

34/34 [==============================] - 0s 371us/sample - loss: 1.5150
Epoch 18/50

34/34 [==============================] - 0s 388us/sample - loss: 1.4253
Epoch 19/50

34/34 [==============================] - 0s 308us/sample - loss: 1.3377
Epoch 20/50

34/34 [==============================] - 0s 236us/sample - loss: 1.2534
Epoch 21/50

34/34 [==============================] - 0s 271us/sample - loss: 1.1744
Epoch 22/50

34/34 [==============================] - 0s 366us/sample - loss: 1.1019
Epoch 23/50

34/34 [==============================] - 0s 261us/sample - loss: 1.0335
Epoch 24/50

34/34 [==============================] - 0s 393us/sample - loss: 0.9659
Epoch 25/50

34/34 [==============================] - 0s 340us/sample - loss: 0.8989
Epoch 26/50

34/34 [==============================] - 0s 241us/sample - loss: 0.8339
Epoch 27/50

34/34 [==============================] - 0s 240us/sample - loss: 0.7720
Epoch 28/50

34/34 [==============================] - 0s 224us/sample - loss: 0.7140
Epoch 29/50

34/34 [==============================] - 0s 223us/sample - loss: 0.6600
Epoch 30/50

34/34 [==============================] - 0s 224us/sample - loss: 0.6099
Epoch 31/50

34/34 [==============================] - 0s 225us/sample - loss: 0.5642
Epoch 32/50

34/34 [==============================] - 0s 233us/sample - loss: 0.5227
Epoch 33/50

34/34 [==============================] - 0s 242us/sample - loss: 0.4836
Epoch 34/50

34/34 [==============================] - 0s 231us/sample - loss: 0.4461
Epoch 35/50

34/34 [==============================] - 0s 223us/sample - loss: 0.4106
Epoch 36/50

34/34 [==============================] - 0s 225us/sample - loss: 0.3778
Epoch 37/50

34/34 [==============================] - 0s 223us/sample - loss: 0.3479
Epoch 38/50

34/34 [==============================] - 0s 332us/sample - loss: 0.3207
Epoch 39/50

34/34 [==============================] - 0s 377us/sample - loss: 0.2960
Epoch 40/50

34/34 [==============================] - 0s 373us/sample - loss: 0.2732
Epoch 41/50

34/34 [==============================] - 0s 370us/sample - loss: 0.2520
Epoch 42/50

34/34 [==============================] - 0s 387us/sample - loss: 0.2326
Epoch 43/50

34/34 [==============================] - 0s 395us/sample - loss: 0.2151
Epoch 44/50

34/34 [==============================] - 0s 394us/sample - loss: 0.1994
Epoch 45/50

34/34 [==============================] - 0s 392us/sample - loss: 0.1852
Epoch 46/50

34/34 [==============================] - 0s 376us/sample - loss: 0.1721
Epoch 47/50

34/34 [==============================] - 0s 2ms/sample - loss: 0.1601
Epoch 48/50

34/34 [==============================] - 0s 359us/sample - loss: 0.1490
Epoch 49/50

34/34 [==============================] - 0s 287us/sample - loss: 0.1390
Epoch 50/50

34/34 [==============================] - 0s 288us/sample - loss: 0.1299
This is a sample teNt aor aang asNaaoanlinggaaangoangangansNnssRgaaRNansansargang nsiaagangangansansanggaasansansangang
stderr
WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.