用 Tensorflow 1.13 寫的簡單模型,雖然1.13已經過氣了,但還是要用來交作業。

python3.6 TF1.13

建模型(串起來)→ 選 opt 和 loss → sess.run([optimizer, loss_func])

import os
import numpy as np
import tensorflow as tf
data_dim = 100
label_pos = 1
label_neg = 0

def read_dataset(src, name):
    with open(f"{src}/rt-polarity.{name}.vecs", "r") as src_file:
        n_lines = sum(1 for line in src_file)
        data = np.empty((n_lines, data_dim), dtype=np.float32)
        labels = np.empty((n_lines, 1), dtype=np.float32)

        src_file.seek(0)
        for i, line in enumerate(src_file.readlines()):
            _, str_label, str_vec = line.split('\\t')
            labels[i] = label_pos if str_label.split(
                '=')[1] == "POS" else label_neg
            data[i, :data_dim] = [float(f) for f in str_vec.split()[:100]]
    return data, labels

train_x, train_y = read_dataset("DATA", "train")
dev_x, dev_y = read_dataset("DATA", "dev")
test_x, test_y = read_dataset("DATA", "test")

def get_random_batches(X, y, batch_size):
    perm = np.random.permutation(len(y))
    X = X[perm]
    y = y[perm]
    X_batches = np.array_split(X, len(y)//batch_size)
    y_batches = np.array_split(y, len(y)//batch_size)
    return X_batches, y_batches

# Layer
def perceptron(input_x, input_size, output_size, activation_func=None):
    print(f"layer: ({input_size:4d}, {output_size:4d}) {activation_func}")
    with tf.variable_scope("Perceptron"):
        W = tf.Variable(tf.random_normal([input_size, output_size]))
        b = tf.Variable(tf.random_normal([output_size]))
        z = tf.add(tf.matmul(input_x, W), b)

    if activation_func == "relu":
        return tf.math.maximum(z, 0)
    elif activation_func == "sigmoid":
        return tf.math.sigmoid(z)
    else:
        return z

# Hyperparameter
batch_size = 10
learning_rate = 0.01
epochs = 20
h1_size, h2_size = 50, 50
show_result_per_epochs = 5

print(
    f"Epochs: {epochs}\\tBatch size: {batch_size}\\t\\tLearning rate: {learning_rate}")
print(f"Loss function: Cross entropy\\nOptimizer: Adam\\n2-norm regularization: no")

# Model
print("\\nModel: ")
X = tf.placeholder(tf.float32, shape=(None, 100))
Y = tf.placeholder(tf.float32, shape=(None, 1))

model = perceptron(X, 100, h1_size, "sigmoid")
model = perceptron(model, h1_size, h2_size, "sigmoid")
y_pred = perceptron(model, h2_size, 1, "sigmoid")

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
loss_func = tf.losses.mean_squared_error(Y, y_pred)
opt = optimizer.minimize(loss_func)

# Predict
pred = y_pred
correct_prediction = tf.equal(tf.round(pred), Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

# Train
with tf.Session() as sess:
    print("\\nTrain:")
    sess.run(tf.global_variables_initializer())
    batches_x, batches_y = get_random_batches(train_x, train_y, batch_size)

    for i in range(1, epochs+1):
        for batch_x, batch_y in zip(batches_x, batches_y):
            _, loss_val = sess.run([opt, loss_func], feed_dict={
                X: batch_x, Y: batch_y})
        if not (i+1) % show_result_per_epochs:
            # History
            train_ac = accuracy.eval({X: train_x, Y: train_y})
            train_loss = loss_func.eval({X: train_x, Y: train_y})
            print(f"Epoch:{i:3d}\\tTrain acc: {train_ac}\\tLoss: {train_loss}\\t")

    # Evaluate
    dev_ac = accuracy.eval({X: dev_x, Y: dev_y})
    dev_loss = loss_func.eval({X: dev_x, Y: dev_y})
    test_ac = accuracy.eval({X: test_x, Y: test_y})
    test_loss = loss_func.eval({X: test_x, Y: test_y})
    print("\\nEvaluate: ")
    print(f"Dev acc: {dev_ac:.5f}\\tDev loss: {dev_loss:10.5f}")
    print(f"Test acc: {test_ac:.5f}\\tTest loss: {test_loss:10.5f}")

Note: