Saturday, 6 April 2024

Save tokenizer ,model and load

 Hi all

-----------------------------------------

from google.colab import drive
drive.mount('/content/drive')
cd /content/drive/MyDrive/HUPHealth

import joblib

file_path = "model.joblib"

joblib.dump(model, file_path)

-------------------------------------------

from tensorflow.keras.models import load_model

emotion_model = load_model("HUPYogadeep.h5") 

--------------------------------------

 from keras.models import load_model
 hupmodel= load_model("hup_sentimental_lstm.h5")
------------------------------
from keras.models import load_model
model.save("hupmodel18.h5")
-------------------------------
from sklearn.preprocessing import LabelEncoder
import joblib

labelencoder = LabelEncoder()

y = labelencoder.fit_transform(df['cyberbullying_type'])
joblib.dump(labelencoder, 'huplabelencoder.pkl')
from tensorflow.keras.utils import to_categorical

y1 = to_categorical(y)
--------------------------------------------
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import pandas as pd
import re
import joblib

nltk.download('stopwords')
port_stem = PorterStemmer()
def stemming(content):
    stemmed_content = re.sub('[^a-zA-Z]',' ',content)
    stemmed_content = stemmed_content.lower()
    stemmed_content = stemmed_content.split()
    stemmed_content = [port_stem.stem(word) for word in stemmed_content if not word in stopwords.words('english')]
    stemmed_content = ' '.join(stemmed_content)
    return stemmed_content

hupstr=input('Enter the tweet')
df1 = pd.DataFrame({'text': [hupstr]})
df1['text']  = df1['text'].apply(stemming)

from keras.models import load_model
##hupmodel= load_model("hupmodel17.h5")
hupmodel= load_model("my_model.keras")


loaded_vectorizer = joblib.load('huptokenizer.joblib')
X2 = loaded_vectorizer.texts_to_sequences(df1['text'])
X2 = pad_sequences(X2,maxlen=337)


hupprediction1 = hupmodel.predict_on_batch(np.stack(X2))
labelencoder = joblib.load('huplabelencoder.pkl')
huplabel1 = labelencoder.inverse_transform(np.argmax(hupprediction1, axis=1))
hupresult1 = ''.join(huplabel1)
print(hupresult1)
------------
hupprediction1 = model.predict([data3]).argmax(axis=1)
OR
hupprediction1 = model.predict_on_batch(np.stack(data3))