Hi
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import pandas as pd
import re
nltk.download('stopwords')
port_stem = PorterStemmer()
def stemming(content):
stemmed_content = re.sub('[^a-zA-Z]',' ',content)
stemmed_content = stemmed_content.lower()
stemmed_content = stemmed_content.split()
stemmed_content = [port_stem.stem(word) for word in stemmed_content if not word in stopwords.words('english')]
stemmed_content = ' '.join(stemmed_content)
return stemmed_content
hupstr=input('Enter the tweet')
df1 = pd.DataFrame({'text': [hupstr]})
df1['text'] = df1['text'].apply(stemming)
from keras.models import load_model
hupmodel= load_model("hupmodel17.h5")
loaded_vectorizer = joblib.load('huptokenizer.joblib')
X2 = loaded_vectorizer.texts_to_sequences(df1['text'])
X2 = pad_sequences(X2,maxlen=337)
#X_test_transformed = loaded_vectorizer.transform(hupinput)
#X_test_dense = X_test_transformed.toarray()
hupprediction1 = hupmodel.predict_on_batch(np.stack(X2))
labelencoder = joblib.load('huplabelencoder.pkl')
huplabel1 = labelencoder.inverse_transform(np.argmax(hupprediction1, axis=1))
hupresult1 = ''.join(huplabel1)
print(hupresult1)
No comments:
Post a Comment