Python Forum
TensorFlow CNN get error - DataType uint8 not in list
Thread Rating:
  • 0 Vote(s) - 0 Average
  • 1
  • 2
  • 3
  • 4
  • 5
TensorFlow CNN get error - DataType uint8 not in list
#1
The experiment is carried out on Windows 10 Pro Intel ® Core ™ i5-4590 CPU @ 3.3 GHz, based on the platform of Anaconda with Spyder Python 3.7.150, it is programming through the Python language and Python library function.

I get the error message as ---

Error:
File "C:\Users\HSIPL\Anaconda3\lib\site-packages\tensorflow_core\python\framework\op_def_library.py", line 60, in _SatisfiesTypeConstraint ", ".join(dtypes.as_dtype(x).name for x in allowed_list))) TypeError: Value passed to parameter 'input' has DataType uint8 not in list of allowed values: float16, bfloat16, float32, float64
Please help me to solve this propblem

# Importing Libraries
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import os

# Preparing Dataset
# Setting names of the directies for both sets
base_dir = 'data'
seta ='Man_One'
setb ='Man_Two'

# Each of the sets has three sub directories train, validation and test
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')

def prepare_data(base_dir, seta, setb):
# Take the directory names for the base directory and both the sets 
# Returns the paths for train, validation for each of the sets
    seta_train_dir = os.path.join(train_dir, seta)
    setb_train_dir = os.path.join(train_dir, setb)

    seta_valid_dir = os.path.join(validation_dir, seta)
    setb_valid_dir = os.path.join(validation_dir, setb)

    seta_train_fnames = os.listdir(seta_train_dir)
    setb_train_fnames = os.listdir(setb_train_dir)

    return seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames

seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames = prepare_data(base_dir, seta, setb)

seta_test_dir = os.path.join(test_dir, seta)
setb_test_dir = os.path.join(test_dir, setb)
test_fnames_seta = os.listdir(seta_test_dir)
test_fnames_setb = os.listdir(setb_test_dir)

datagen = ImageDataGenerator( 
          height_shift_range = 0.2,
          width_shift_range = 0.2,
          rotation_range = 40,
          shear_range = 0.2,
          zoom_range = 0.2,
          horizontal_flip = True,
          fill_mode = 'nearest')

img_path = os.path.join(seta_train_dir, seta_train_fnames[3])
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)

i = 0
for batch in datagen.flow(x, batch_size = 1):
    plt.figure(i)
    imgplot = plt.imshow(array_to_img(batch[0]))
    i += 1
    if i % 5 == 0:
        break
        
# Convolutional Neural Network Model
# Import TensorFlow Libraries
from tensorflow.keras import layers
from tensorflow.keras import Model

img_input = layers.Input(shape = (150, 150, 3))        

# 2D Convolution Layer with 64 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(32, 3, activation = 'relu')(img_input)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 128 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(64, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 256 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(128, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(256, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(256, 3, activation = 'relu')(x)
# Flatten Layer
x = layers.Flatten()(x)

# Fully Connected Layers and ReLU activation algorithm
x = layers.Dense(128, activation = 'relu')(x)
x = layers.Dense(128, activation = 'relu')(x)
x = layers.Dense(100, activation = 'relu')(x)

# Dropout Layers for optimisation
x = layers.Dropout(0.5)(x)

# Fully Connected Layers and sigmoid activation algorithm
output = layers.Dense(1, activation = 'sigmoid')(x)

model = Model(img_input, output)

model.summary()

import tensorflow as tf
# Using binary_crossentropy as the loss function and
# Adam Optimizer as the optimizing function when training
model.compile(loss = 'binary_crossentropy',
              optimizer = tf.optimizers.Adam(learning_rate = 0.0005),
              metrics = ['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator            

# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)

# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
                  train_dir,
                  target_size = (150, 150),
                  batch_size = 20,
                  class_mode = 'binary')

validation_generator = test_datagen.flow_from_directory(
                       validation_dir,
                       target_size = (150, 150),
                       batch_size = 20,
                       class_mode = 'binary')

import matplotlib.image as mpimg

# 4x4 grid
ncols = 5
nrows = 5

pic_index = 0

# Set up matpotlib fig and size it to fit 5x5 pics
fig = plt.gcf()
fig.set_size_inches(ncols * 5, nrows * 5)

pic_index += 10
next_seta_pix = [os.path.join(seta_train_dir, fname)
                 for fname in seta_train_fnames[pic_index-10:pic_index]]
next_setb_pix = [os.path.join(setb_train_dir, fname)
                 for fname in setb_train_fnames[pic_index-10:pic_index]]

for i, img_path in enumerate(next_seta_pix+next_setb_pix):
# Set up subplot; subplot indices start at 1
    sp = plt.subplot(nrows, ncols, i + 1)
    sp.axis('Off')

    img =mpimg.imread(img_path)
    plt.imshow(img)
    
plt.show()

# Train the model
mymodel = model.fit_generator(
          train_generator,
          steps_per_epoch = 10,
          epochs = 80,
          validation_data = validation_generator,
          validation_steps = 7,
          verbose = 2)

import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img

successive_outputs = [layer.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)

a_img_files = [os.path.join(seta_train_dir, f) for f in seta_train_fnames]
b_img_files = [os.path.join(setb_train_dir, f) for f in setb_train_fnames]
img_path = random.choice(a_img_files + b_img_files)

img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)

x /= 255

successive_feature_maps = visualization_model.predict(x)

layer_names = [layer.name for layer in model.layers]

for layer_name, feature_map in zip(layer_names, successive_feature_maps):
    if len(feature_map.shape) == 4:
# Just do this for the conv/maxpool layers
        n_features = feature_map.shape[-1]
# The feature map has shape(1, size,size, n_features)
        size = feature_map.shape[1]
# Will tile images in this matrix
        display_grid = np.zeros((size, size * n_features))
        for i in range(n_features):
# Postprocess the feature           
            x = feature_map[0, :, :, i]
            x -= x.mean()
            x /= x.std()
            x *= 8
            x += 64
            x = np.clip(x, 0, 255).astype('uint8')
# Will tile each filter into this big horizontal grid
            display_grid[:, i * size : (i + 1) * size] = x        
            
# Accuracy results for each training and validation epoch
acc = mymodel.history['acc']
val_acc = mymodel.history['val_acc']

# Loss Results for each training and validation epoch
loss = mymodel.history['loss']
val_loss = mymodel.history['val_loss']

epochs = range(len(acc))

# Plot accuracy for each training and validation epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')

plt.figure()

# Plot loss for each training and validation epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')

# Testing model on a random train image from set a
train_img = random.choice(seta_train_fnames)
train_image_path = os.path.join(seta_train_dir, train_img)
train_img = load_img(train_image_path, target_size =(150, 150))
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
print(train_img.shape)

model.predict(train_img)

# Testing model on a random train image from set b

train_img = random.choice(setb_train_fnames)
train_image_path = os.path.join(setb_train_dir, train_img)
train_img = load_img(train_image_path, target_size =(150, 150))
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
print(train_img.shape)

model.predict(train_img)

# Testing a random image from the test set a 

cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_seta:
    if fname.startswitch('.'):
        continue
    file_path = os.path.join(seta_test_dir, fname)
    load_file = load_img(file_path, target_size = (150, 150))
    load_file = (np.expand_dims(load_file, 0))
    pred_img = model.predict(load_file)
    if(pred_img[0]<0.5):
        cal_mo+=1
    elif(pred_img[0]>0.5):
        cal_mt+=1
    else:
        print(pred_img[0], "\n")
        cal_unconclusive+=1
        alist.append(file_path)
print(alist)

print("Identified as: \n")
print("Man One:", cal_mo)
print("Man Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100)
a =  (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100
   
# Testing a random image from the test set b

cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_setb:
    if fname.startswitch('.'):
        continue
    file_path = os.path.join(setb_test_dir, fname)
    load_file = load_img(file_path, target_size = (150, 150))
    load_file = (np.expand_dims(load_file, 0))
    pred_img = model.predict(load_file)
    if(pred_img[0]<0.5):
        cal_mo+=1
    elif(pred_img[0]>0.5):
        cal_mt+=1
    else:
        print(pred_img[0], "\n")
        cal_unconclusive+=1
        alist.append(file_path)
print(alist)

print("Identified as: \n")
print("Man One:", cal_mo)
print("Man Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100)
b =  (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100             

avg = (a+b)/2
print("Average Percentage:", avg)
This is urgent case for me - thus please help
Reply
#2
def load_imgtest(path_to_img):
# max_dim = 150
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)

# shape = tf.cast(tf.shape(img)[:-1], tf.float32)
# print(shape)

# long_dim = max(shape)
# print(long_dim)
# scale = max_dim / long_dim
# print(scale)

# new_shape = tf.cast(shape * scale, tf.int32)
# print(new_shape)

img = tf.image.resize(img, [150,150]) # img = tf.image.resize(img, new_shape)
print(img)
img = img[tf.newaxis, :]
return img
train_img = load_imgtest(train_image_path)
print(train_img.shape)


this function will help you. :)) I am also running this source from youtube
Reply
#3



Thank you but may I know where to (insert) put your code in the whole programming





Reply
#4
(Nov-04-2019, 12:47 PM)vokoyo Wrote: Thank you but may I know where to (insert) put your code in the whole programming
# Importing Libraries
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import os

# Preparing Dataset
# Setting names of the directies for both sets
base_dir = 'twoclass'
seta ='loc'
setb ='luan'

# Each of the sets has three sub directories train, validation and test
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')

def prepare_data(base_dir, seta, setb):
# Take the directory names for the base directory and both the sets
# Returns the paths for train, validation for each of the sets
seta_train_dir = os.path.join(train_dir, seta)
setb_train_dir = os.path.join(train_dir, setb)

seta_valid_dir = os.path.join(validation_dir, seta)
setb_valid_dir = os.path.join(validation_dir, setb)

seta_train_fnames = os.listdir(seta_train_dir)
setb_train_fnames = os.listdir(setb_train_dir)

return seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames

seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames = prepare_data(base_dir, seta, setb)

seta_test_dir = os.path.join(test_dir, seta)
setb_test_dir = os.path.join(test_dir, setb)
test_fnames_seta = os.listdir(seta_test_dir)
test_fnames_setb = os.listdir(setb_test_dir)

datagen = ImageDataGenerator(
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 40,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')

img_path = os.path.join(seta_train_dir, seta_train_fnames[0])
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)

i = 0
for batch in datagen.flow(x, batch_size = 1):
plt.figure(i)
imgplot = plt.imshow(array_to_img(batch[0]))
i += 1
if i % 5 == 0:
break

# Convolutional Neural Network Model
# Import TensorFlow Libraries
from tensorflow.keras import layers
from tensorflow.keras import Model

img_input = layers.Input(shape = (150, 150, 3))

# 2D Convolution Layer with 64 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(32, 3, activation = 'relu')(img_input)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 128 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(64, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 256 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(128, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(256, 3, activation = 'relu')(x)
# 2D Max Pooling Layer
x = layers.MaxPooling2D(2)(x)

# 2D Convolution Layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(256, 3, activation = 'relu')(x)
# Flatten Layer
x = layers.Flatten()(x)

# Fully Connected Layers and ReLU activation algorithm
x = layers.Dense(128, activation = 'relu')(x)
x = layers.Dense(128, activation = 'relu')(x)
x = layers.Dense(100, activation = 'relu')(x)

# Dropout Layers for optimisation
x = layers.Dropout(0.5)(x)

# Fully Connected Layers and sigmoid activation algorithm
output = layers.Dense(1, activation = 'sigmoid')(x)

model = Model(img_input, output)

model.summary()

import tensorflow as tf
# Using binary_crossentropy as the loss function and
# Adam Optimizer as the optimizing function when training
model.compile(loss = 'binary_crossentropy',
optimizer = tf.optimizers.Adam(learning_rate = 0.0005),
metrics = ['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator

# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)

# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')

validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')

import matplotlib.image as mpimg

# 4x4 grid
ncols = 5
nrows = 5

pic_index = 0

# Set up matpotlib fig and size it to fit 5x5 pics
fig = plt.gcf()
fig.set_size_inches(ncols * 5, nrows * 5)

pic_index += 10
next_seta_pix = [os.path.join(seta_train_dir, fname)
for fname in seta_train_fnames[pic_index-10:pic_index]]
next_setb_pix = [os.path.join(setb_train_dir, fname)
for fname in setb_train_fnames[pic_index-10:pic_index]]

for i, img_path in enumerate(next_seta_pix+next_setb_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off')

img =mpimg.imread(img_path)
plt.imshow(img)

# plt.show()

# Train the model
mymodel = model.fit_generator(
train_generator,
steps_per_epoch = 10,
epochs = 80,
validation_data = validation_generator,
validation_steps = 7,
verbose = 2)

import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img

successive_outputs = [layer.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)

a_img_files = [os.path.join(seta_train_dir, f) for f in seta_train_fnames]
b_img_files = [os.path.join(setb_train_dir, f) for f in setb_train_fnames]
img_path = random.choice(a_img_files + b_img_files)

img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)

x /= 255

successive_feature_maps = visualization_model.predict(x)

layer_names = [layer.name for layer in model.layers]

for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv/maxpool layers
n_features = feature_map.shape[-1]
# The feature map has shape(1, size,size, n_features)
size = feature_map.shape[1]
# Will tile images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature
x = feature_map[0, :, :, i]
x -= x.mean()
x /= x.std()
x *= 8
x += 64
# x = np.clip(x, 0, 255).astype('uint8')
x = np.clip(x, 0, 255).astype('float32')
# Will tile each filter into this big horizontal grid
display_grid[:, i * size : (i + 1) * size] = x

# Accuracy results for each training and validation epoch
acc = mymodel.history['acc']
val_acc = mymodel.history['val_acc']

# Loss Results for each training and validation epoch
loss = mymodel.history['loss']
val_loss = mymodel.history['val_loss']

epochs = range(len(acc))

# Plot accuracy for each training and validation epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')

plt.figure()

# Plot loss for each training and validation epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')

# Testing model on a random train image from set a
train_img = random.choice(seta_train_fnames)
train_image_path = os.path.join(seta_train_dir, train_img)
# train_img = load_img(train_image_path, target_size =(150, 150))
# #
# # train_img = tf.image.decode_image(train_img, channels=3)
# # train_img = tf.image.convert_image_dtype(train_img, tf.float32)
# # shape = tf.cast(tf.shape(train_img)[:-1], tf.float32)
# #
# plt.imshow(train_img)
# train_img = (np.expand_dims(train_img, 0))
def load_imgtest(path_to_img):
# max_dim = 150
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)

# shape = tf.cast(tf.shape(img)[:-1], tf.float32)
# print(shape)

# long_dim = max(shape)
# print(long_dim)
# scale = max_dim / long_dim
# print(scale)

# new_shape = tf.cast(shape * scale, tf.int32)
# print(new_shape)

img = tf.image.resize(img, [150,150]) # img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
train_img = load_imgtest(train_image_path)
print(train_img.shape)

model.predict(train_img)

# # Testing model on a random train image from set b

# train_img = random.choice(setb_train_fnames)
# train_image_path = os.path.join(setb_train_dir, train_img)
# train_img = load_img(train_image_path, target_size =(150, 150))
# plt.imshow(train_img)
# train_img = (np.expand_dims(train_img, 0))
# print(train_img.shape)

# model.predict(train_img)

# Testing a random image from the test set a

cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_seta:
if fname.startswith('.'):
continue
file_path = os.path.join(seta_test_dir, fname)
# load_file = load_img(file_path, target_size = (150, 150))
# load_file = (np.expand_dims(load_file, 0))
load_file = load_imgtest(file_path)
pred_img = model.predict(load_file)
print(file_path)
print(pred_img,"\n")
if(pred_img[0]<0.5):
cal_mo+=1
elif(pred_img[0]>0.5):
cal_mt+=1
else:
print(pred_img[0], "\n")
cal_unconclusive+=1
alist.append(file_path)
print(alist)

print("Identified as: \n")
print("Man One:", cal_mo)
print("Man Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100)
a = (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100

# # Testing a random image from the test set b

# cal_mo = 0
# cal_mt = 0
# cal_unconclusive = 0
# alist = []
# for fname in test_fnames_setb:
# if fname.startswitch('.'):
# continue
# file_path = os.path.join(setb_test_dir, fname)
# load_file = load_img(file_path, target_size = (150, 150))
# load_file = (np.expand_dims(load_file, 0))
# pred_img = model.predict(load_file)
# if(pred_img[0]<0.5):
# cal_mo+=1
# elif(pred_img[0]>0.5):
# cal_mt+=1
# else:
# print(pred_img[0], "\n")
# cal_unconclusive+=1
# alist.append(file_path)
# print(alist)

# print("Identified as: \n")
# print("Man One:", cal_mo)
# print("Man Two:", cal_mt)
# print( "Inconclusive:", cal_unconclusive)
# print( "Percentage:", (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100)
# b = (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100

# avg = (a+b)/2
# print("Average Percentage:", avg)
Reply


Possibly Related Threads…
Thread Author Replies Views Last Post
  How to install tensorflow? It shows as error jaroslavtavgen 1 1,926 Apr-26-2020, 10:09 PM
Last Post: Larz60+
  TensorFlow get error - array with more than one element is ambiguous vokoyo 3 5,502 Nov-07-2019, 01:12 PM
Last Post: ThomasL
  Newbie at using python and tensorflow getting error when running simple code FeatherineAu 0 3,963 Sep-28-2018, 02:09 PM
Last Post: FeatherineAu
  Python import tensorflow error owenhe 7 6,994 Jun-23-2018, 12:41 PM
Last Post: gontajones

Forum Jump:

User Panel Messages

Announcements
Announcement #1 8/1/2020
Announcement #2 8/2/2020
Announcement #3 8/6/2020