Wednesday, 28 October 2020

Sort in reverse order in Python

 

Hi,

Here one list is loaded with cubes. One element is edited intentionally. 


a=[]

for i in range(10):

a.append(i**3)

a[4]=10

print(a)

a.sort(reverse=True)

print(a)

Array using Numpy and List in PythonTuple

 Hi all

Array can be implemented in Python using two methods

####Numpy

import numpy as np

hup=np.arange(10)

hup=np.zeros(10)

for i in range(10):

hup[i]=i**2


print(hup)

############

#Using List

a=[]

for i in range(10):

a.append(i**3)

print(a)


a = np.array([[10],
              [01]])
b = np.array([[41],
              [22]])
c=np.matmul(a, b)
print(c)

Monday, 7 September 2020

DB Connection from Python and Serial Port connection in Python

 Hi 

Use this code for Python MySql connection. You need mysql.connector for this purpose. you can install this using pip command in Python.

Also install serial using pip for serial connection.

---------------------------------------------------------------------------------------------------------------------


import mysql.connector

import time;

import serial;

from mysql.connector import Error

from mysql.connector import errorcode

ser = serial.Serial('COM3', 9600, timeout=0,parity=serial.PARITY_EVEN, rtscts=1)


try:

   connection = mysql.connector.connect(host='localhost',

                             database='iot',

                             user='root',

                             password='')

   cur = connection.cursor()

   while True:

      s=ser.read();

      print(s);

      time.sleep(1);

      sql=("INSERT INTO timerecord (userid, recordtime, userrole ) VALUES (%s,%s,%s)")

      val=("1",s,"1")

      if s=="9":

         cur.execute(sql,val)

   connection.close()

except mysql.connector.Error as error :

    connection.rollback() #rollback if any exception occured

    print("Failed inserting record into python_users table {}".format(error))


Thursday, 7 May 2020

Text Analysis

Hi All,

Text Analysis in Python can be done with the help of so many functions. The various Types used are
List ,  Set etc.
Split is a function used with List. Len() function is also used.

Find some programs
1.
t='fine'
sentence='I am fine'
huparray=sentence.split()
flag=0
for word in huparray:
    if word==t:
        flag=1
if flag==1:
    print('Positive')
     
   
2.
p='fine good great'

sentence='I am fine and great but'
shuparray=sentence.split()

phuparray=p.split();
flag=0

pcount=0

for word in shuparray:
    for word1 in phuparray:
        if word==word1:
            pcount=pcount+1

        
if pcount>0:
    print('Positive')

print(pcount)
       
    
3. p='fine good great'
n='bad poor slow cry'
sentence='I am fine and great but slow cry'
shuparray=sentence.split()
nhuparray=n.split();
phuparray=p.split();
flag=0
ncount=0
pcount=0

for word in shuparray:
    for word1 in phuparray:
        if word==word1:
            pcount=pcount+1
            
for word in shuparray:
    for word1 in nhuparray:
        if word==word1:
            ncount=ncount+1
        
if pcount>ncount:
    print('Positive')
elif ncount>pcount:
    print('Negative')
else:
    print('Neutral')

print(ncount)
print(pcount)
       
    
4. p='fine good great'

sentence='I am fine and great but'
shuparray=sentence.split()

phuparray=p.split();
flag=0

pcount=0

plist=set(phuparray) & set(shuparray)

        
print(len(list(plist)))
       
    

Sunday, 12 April 2020

Encoding

Hi all,

Pls find the encoding program.

------------------------------------------------
//text=fileread(fullpathname);
text='HUP'
ASCII = double(text)
A=dec2bin(ASCII,8)

B= logical(A - 48)

 m=size(B,1)
 n=size(B,2)

p='A'
q='C'
r='G'
S= 'T'

k=1
 x=.4;
h=waitbar(x,'Process start');
for i=1:m
for j=1:2:n
if (B(i,j)==0 && B(i,j+1)==1)
sc(k)=p
elseif (B(i,j)==1 && B(i,j+1)==0)
sc(k)=q
elseif (B(i,j)==0 && B(i,j+1)==0)
sc(k)=r
else
sc(k)=S
end
k=k+1
end
end
p53nt = fastaread('p53nt.txt')
j=1
for i=1:4:length(sc)
k=strfind(p53nt.Sequence,sc(i:i+3))

code(j)=k(randi([1 length(k)]));
j=j+1;
end


fileID = fopen('Ciphertext.txt','w');
for (i=1:length(code))
fprintf(fileID,'%d ',code(i))
end

Create Environment and configure Tensorflow

Hi All,
We can configure Environment in Python and Configure Tensorflow using the below given statements.

1. install python
2. install virtual environment
pip install virtualenv
3. Create a floder c:\HUP
4. create virtual environment
python -m virtualenv c:\HUP
5. Activate virtual environment
c:\HUP\Scripts\activate

pip install https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.8.0-py3-none-any.whl

Sunday, 9 February 2020

ANN in Python

Hi all,
See the code.

//courtesy : Internet

# Perceptron Algorithm on the Sonar Dataset
from random import seed
from random import randrange
from csv import reader

# Load a CSV file
def load_csv(filename):
  dataset = list()
  with open(filename, 'r'as file:
    csv_reader = reader(file)
    for row in csv_reader:
      if not row:
        continue
      dataset.append(row)
  return dataset

# Convert string column to float
def str_column_to_float(datasetcolumn):
  for row in dataset:
    row[column] = float(row[column].strip())

# Convert string column to integer
def str_column_to_int(datasetcolumn):
  class_values = [row[column] for row in dataset]
  unique = set(class_values)
  lookup = dict()
  for i, value in enumerate(unique):
    lookup[value] = i
  for row in dataset:
    row[column] = lookup[row[column]]
  return lookup

# Split a dataset into k folds
def cross_validation_split(datasetn_folds):
  dataset_split = list()
  dataset_copy = list(dataset)
  fold_size = int(len(dataset) / n_folds)
  for i in range(n_folds):
    fold = list()
    while len(fold) < fold_size:
      index = randrange(len(dataset_copy))
      fold.append(dataset_copy.pop(index))
    dataset_split.append(fold)
  return dataset_split

# Calculate accuracy percentage
def accuracy_metric(actualpredicted):
  correct = 0
  for i in range(len(actual)):
    if actual[i] == predicted[i]:
      correct += 1
  return correct / float(len(actual)) * 100.0

# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(datasetalgorithmn_folds, *args):
  folds = cross_validation_split(dataset, n_folds)
  scores = list()
  for fold in folds:
    train_set = list(folds)
    train_set.remove(fold)
    train_set = sum(train_set, [])
    test_set = list()
    for row in fold:
      row_copy = list(row)
      test_set.append(row_copy)
      row_copy[-1] = None
    predicted = algorithm(train_set, test_set, *args)
    actual = [row[-1for row in fold]
    accuracy = accuracy_metric(actual, predicted)
    scores.append(accuracy)
  return scores

# Make a prediction with weights
def predict(rowweights):
  activation = weights[0]
  for i in range(len(row)-1):
    activation += weights[i + 1] * row[i]
  return 1.0 if activation >= 0.0 else 0.0

# Estimate Perceptron weights using stochastic gradient descent
def train_weights(trainl_raten_epoch):
  weights = [0.0 for i in range(len(train[0]))]
  for epoch in range(n_epoch):
    for row in train:
      prediction = predict(row, weights)
      error = row[-1] - prediction
      weights[0] = weights[0] + l_rate * error
      for i in range(len(row)-1):
        weights[i + 1] = weights[i + 1] + l_rate * error * row[i]
  return weights

# Perceptron Algorithm With Stochastic Gradient Descent
def perceptron(traintestl_raten_epoch):
  predictions = list()
  weights = train_weights(train, l_rate, n_epoch)
  for row in test:
    prediction = predict(row, weights)
    predictions.append(prediction)
  return(predictions)

# Test the Perceptron algorithm on the sonar dataset
seed(1)
# load and prepare data
filename = 'tcsr.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0])-1):
  str_column_to_float(dataset, i)
# convert string class to integers
str_column_to_int(dataset, len(dataset[0])-1)
# evaluate algorithm
n_folds = 3
l_rate = 0.01
n_epoch = 500
scores = evaluate_algorithm(dataset, perceptron, n_folds, l_rate, n_epoch)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))

Friday, 3 January 2020

Replace the tweet emoticon with sentiment text

Hi Dears,
See the code
This code helps to replace a text using another one in Python.
This is applied in sentiments analysis. So that the emoticons can be replaced by its sentiments.
This is used in the data mining part of the sentiments analysis using Python.


--------------------------
from nltk import NaiveBayesClassifier as nbc
from nltk.tokenize import word_tokenize
from itertools import chain
import csv
sentence="it is #abcd1212 and #12341212"
with open('emoticon.csv','r'as csvinput:
    reader=csv.reader(csvinput,delimiter=",")
    rownum = 0 
    training_data = []

    for code,tag in reader:
        sentence=sentence.replace(code,tag)
        
print(sentence)

------------------------------------
Output : it is positive and negativve
------------------------------------

emoticon.csv

#abcd1212,positive #12341212,negative #121212aa,positive