Category Archives: Python

Python: RNN principle realized by numpy

Python implements the principle of RNN
I’ve tweaked the code a little bit so it can do gradient descent.

import numpy as np
import torch
from torch import nn

class Rnn(nn.Module):

    def __init__(self, input_size, hidden_size, num_layers, bidirectional=False):
        super(Rnn, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bidirectional = bidirectional

    def forward(self, x):
        '''

        :param x: [seq, batch_size, embedding]
        :return: out, hidden
        '''

        # x.shape [sep, batch, feature]
        # hidden.shape [hidden_size, batch]
        # Whh0.shape [hidden_size, hidden_size]  Wih0.shape [hidden_size, feature]
        # Whh1.shape [hidden_size, hidden_size]  Wih1.size  [hidden_size, hidden_size]

        out = []
        x, hidden = np.array(x), [np.zeros((self.hidden_size, x.shape[1])) for i in range(self.num_layers)]
        Wih = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(1, self.num_layers)]
        Wih0 = np.random.random((self.hidden_size, x.shape[2]))
        Whh = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(self.num_layers)]
        # x, hidden, Wih, Whh = torch.from_numpy(x), torch.tensor(hidden), torch.tensor(Wih), torch.tensor(Whh)

        x = torch.from_numpy(x)
        hidden = torch.tensor(hidden)
        Wih0 = torch.tensor(Wih0, requires_grad=True)
        Wih, Whh = torch.tensor(Wih, requires_grad=True), torch.tensor(Whh, requires_grad=True)

        time = x.shape[0]
        for i in range(time):
            hidden[0] = torch.tanh((torch.matmul(Wih0, torch.transpose(x[i, ...], 1, 0)) +
                              torch.matmul(Whh[0], hidden[0])
                              ))

            for i in range(1, self.num_layers):
                hidden[i] = torch.tanh((torch.matmul(Wih[i-1], hidden[i-1]) +
                                     torch.matmul(Whh[i], hidden[i])
                                     ))

            out.append(hidden[self.num_layers-1])
        # If the element in the list is a tensor, it cannot be converted with torch.tensor() and an error will be reported
        return torch.stack([i for i in out]), hidden


def sigmoid(x):
    return 1.0/(1.0 + 1.0/np.exp(x))


if __name__ == '__main__':
    a = torch.tensor([1, 2, 3])
    print(torch.cuda.is_available(), type(a))
    rnn = Rnn(1, 5, 4)
    input = np.random.random((6, 2, 1))
    out, h = rnn(input)
    print(f'seq is {input.shape[0]}, batch_size is {input.shape[1]} ', 'out.shape ', out.shape, ' h.shape ', h.shape)
    # print(sigmoid(np.random.random((2, 3))))
    #
    # element-wise multiplication
    # print(np.array([1, 2])*np.array([2, 1]))

The divider
First of all, the code is just for understanding. The gradient descent part is not written. The default parameters have been fixed, so it does not affect understanding. Code mainly to achieve the principle of RNN, only use NUMPY library, can not be used for GPU acceleration.

import numpy as np


class Rnn():

    def __init__(self, input_size, hidden_size, num_layers, bidirectional=False):
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bidirectional = bidirectional

    def feed(self, x):
        '''

        :param x: [seq, batch_size, embedding]
        :return: out, hidden
        '''

        # x.shape [sep, batch, feature]
        # hidden.shape [hidden_size, batch]
        # Whh0.shape [hidden_size, hidden_size]  Wih0.shape [hidden_size, feature]
        # Whh1.shape [hidden_size, hidden_size]  Wih1.size  [hidden_size, hidden_size]

        out = []
        x, hidden = np.array(x), [np.zeros((self.hidden_size, x.shape[1])) for i in range(self.num_layers)]
        Wih = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(1, self.num_layers)]
        Wih.insert(0, np.random.random((self.hidden_size, x.shape[2])))
        Whh = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(self.num_layers)]

        time = x.shape[0]
        for i in range(time):
            hidden[0] = np.tanh((np.dot(Wih[0], np.transpose(x[i, ...], (1, 0))) +
                              np.dot(Whh[0], hidden[0])
                              ))

            for i in range(1, self.num_layers):
                hidden[i] = np.tanh((np.dot(Wih[i], hidden[i-1]) +
                                     np.dot(Whh[i], hidden[i])
                                     ))

            out.append(hidden[self.num_layers-1])

        return np.array(out), np.array(hidden)


def sigmoid(x):
    return 1.0/(1.0 + 1.0/np.exp(x))


if __name__ == '__main__':
    rnn = Rnn(1, 5, 4)
    input = np.random.random((6, 2, 1))
    out, h = rnn.feed(input)
    print(f'seq is {input.shape[0]}, batch_size is {input.shape[1]} ', 'out.shape ', out.shape, ' h.shape ', h.shape)
    # print(sigmoid(np.random.random((2, 3))))
    #
    # element-wise multiplication
    # print(np.array([1, 2])*np.array([2, 1]))

Python opens the table and appears pandas.errors.ParserError: Error tokenizing data. C error:

CSV files are delimited by commas by default, but commas are used a lot in Chinese, so when writing to CSV, Pandas can set the splitter symbol sep= ‘\t’, which is tab-delimited.
that such read behind the CSV data processing, must remember to add a parameter delimiter:

path = r"Your input path and file"
data = pd.read_csv(path,delimiter="\t")
``

Python: How to Obtaining Publick IP Quickly

from urllib.request import urlopen

def get_ip(self):
  try:
    return str(urlopen('http://159.75.41.235/').read())   
  except:
    return False

print(get_ip())

Access to external network IP speed mainly depends on the server speed, this domestic than foreign those much faster
Return the sample

{"city_id":2140,"region":"China|0|Guangdong|Guangzhou|Guangdong Radio and Television","ip":"124.240.53.227"}

ValueError: need more than 1 value to unpack

class Person():

def __init__(self, Newname, Newage, Newtype):

self. type = Newtype,self.name = Newname,self. age = Newage,

This is the wrong way to write, not on the same line!

#     def __init__(self, Newname,Newage,Newtype):

#         self.name = Newname,

#         self. age = Newage,

#         self. type = Newtype,

from django. http import  HttpResponse
from django. template import Template ,Context
import datetime
from HelloDjango.Model. Person import *

def Hello(request , offset ):

tempStr = “{{person.name}} {{person. age}} {{person. type}}”
tempAnswer = {“person” : Person(“hjc”, “11”, “man”)}
#
#     tempStr = “{{person.name}} {{person. age}} {{person. type}}”
#     tempAnswer = {“person” : Person(‘hjc’ , ’11’ ,’man’)}

t = Template(tempStr)
c = Context(tempAnswer)
return HttpResponse(t.render(c))

Typeerror in Python regular expression: expected string or bytes like object

The following error occurs when parsing web page data with beautifulSoup and processing data with regular expressions:
TypeError: Expected string or bytes-like object TypeError: Expected string or bytes-like object TypeError: Expected string or bytes-like object
It is generally caused by data type mismatch.
There are six standard data types in Python3: 

Print (type(object)) to check the current data type, where object is the object to query.

First, there is a code that looks like this:

import re
import requests
from bs4 import BeautifulSoup
import lxml

#get the html data
urlSave = "https://www.douban.com/people/yekingyan/statuses"
req = requests.get(urlSave)
soup = BeautifulSoup(req.text,'lxml')

# After parsing beautifulsoup, get the required data
times = soup.select('div.actions > span')
says = soup.select('div.status-saying > blockquote')

And then I’m going to look at it and I’m going to get the data what is the numeric type

print('says:',type(says))

The result: Says: lt; class ‘list’>
This tells us that the data selected from beautifulSoup in soup.select() is of the list type.
Next, extract the data in the list separately

#Traversing the output
for say in says:
    print(type(say))

Let’s see what type it is
The result: <<; class ‘bs4.element.Tag’> , different from the above six types
Beautiful Soup converts a complex HTML document into a complex tree structure, where each node is a Python object. All objects can be classified into four types:
TagNavigableStringBeautifulSoupComment
Use regular expressions directly to the data

for say in says:
    # Regular expressions to get the necessary data
    say = re.search('<p>(.*?)</p>',say)

There is an error
TypeError: expected string or bytes-like object
Therefore, before the regular expression, the problem is solved by converting the data type. As follows:

for say in says:
    # Convert the data type, otherwise an error will be reported
    say = str(say)
    # Regular expressions to get the necessary data
    say = re.search('<p>(.*?)</p>',say)

 

Pychar can’t connect to Python console, but it can run. Py file, and Anaconda’s command line can run Python command

Error:
Traceback (most recent call last):
File “D:\PyCharm Edu 2020.3.3\plugins\python-ce\helpers\pydev\pydevconsole. py”, line 5, in
from _pydev_comm. pydev_rpc import make_rpc_client, start_rpc_server, start_rpc_server_and_make_client
File “D:\PyCharm Edu 2020.3.3\plugins\python-ce\helpers\pydev_pydev_comm\pydev_rpc. py”, line 4, in
from _pydev_comm. pydev_server import TSingleThreadedServer
File “D:\PyCharm Edu 2020.3.3\plugins\python-ce\helpers\pydev_pydev_comm\pydev_server. py”, line 4, in
from _shaded_thriftpy. server import TServer
File “D:\PyCharm Edu 2020.3.3\plugins\python-ce\helpers\third_party\thriftpy_shaded_thriftpy\server. py”, line 9, in
from shaded_thriftpy. transport import (
File “D:\PyCharm Edu 2020.3.3\plugins\python-ce\helpers\third_party\thriftpy_shaded_thriftpy\transport_init. py”, line 57, in
from .sslsocket import TSSLSocket, TSSLServerSocket # noqa
File “D:\PyCharm Edu 2020.3.3\plugins\python-ce\helpers\third_party\thriftpy_shaded_thriftpy\transport\sslsocket. py”, line 7, in
import ssl
File “D:\Anaconda\lib\ssl. py”, line 98, in
import _ssl # if we can’t import it, let the error propagate
ImportError: DLL load failed while importing _ssl: The specified program could not be found.

How to Fix this error
Link: https://stackoverflow.com/questions/54175042/python-3-7-anaconda-environment-import-ssl-dll-load-fail-error
Copy the following files from anaconda\Library\bin to anaconda/DLLs
libcrypto-1_1-x64.dlllibssl-1_1-x64.dll

Python: How to Create an Automatic Recording Program

automatic recording program connects the radio station, if someone talking channel automatically recorded
all the recording stored in an audio file of a day, will record the start time, stop time, duration, the starting position in the recording, convenient and rapid positioning, no one spoke automatically stop recording, saving disk,
Multithreading technology, even if the program crashes the recording file will not be damaged
= = = = = = = = = =
Warning:
Please abide by the Radio Management Regulations of the People’s Republic of China when using this procedure. > Regulations of the People’s Republic of China on Radio Administration
This procedure is only used for learning and communication, and shall not be used for illegal purposes

import threading
import pyaudio
import copy
import math
import time
import numpy
import wave

localtime = time.localtime()
localtimestr = time.strftime("%Y-%m-%d-%H-%M-%S",localtime)
#ltime = time.time()
line = 0
class RecordThread(threading.Thread):
    def __init__(self, audiofile="C:/Users/Public/RE/"+localtimestr+".wav"):
        threading.Thread.__init__(self)
        self.bRecord = True
        self.rr = True
        self.audiofile = audiofile
        self.chunk = 1024
        self.format = pyaudio.paInt16
        self.channels = 1
        self.rate = 16000

    def run(self):
        #print("RUN....")
        audio = pyaudio.PyAudio()
        wavfile = wave.open(self.audiofile, 'wb')
        wavfile.setnchannels(self.channels)
        wavfile.setsampwidth(audio.get_sample_size(self.format))
        wavfile.setframerate(self.rate)
        wavstream = audio.open(format=self.format,
                               channels=self.channels,
                               rate=self.rate,
                               input=True,
                               frames_per_buffer=self.chunk)
        global xx
        global yy
        xx = 0
        yy = 0
        global line
        alltime = 0
        ntime1 = 0
        ntime2 = 0
        starttime = 0
        stoptime = 0
        timediff = 0
        while self.bRecord:
            data = wavstream.read(self.chunk)
            wavdata = numpy.fromstring(data,dtype=numpy.short)
            M = []
            for i in range(0,len(wavdata),16000):
                M.append(wavdata[i:i+16000]/10)
                M=map(abslist,M)
                sound = list(map(mean,M))
            if sound[0] > 50:
                #print("over")
                #Write
                xx = 1
                wavfile.writeframes(data)
            else:
                xx = 0
            if xx > yy:
                yy = 1
                #START
                starttime = time.time()
                alltime = round(alltime + timediff,3)
                
                log("StartTime: "+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(starttime))+"    开始时间: "+timestr(alltime))
                print("StartTime: "+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(starttime))+"    开始时间: "+timestr(alltime))
            if xx < yy:
                yy = 0
                #STOP
                stoptime = time.time()
                timediff = round(stoptime - starttime,3)
                
                srt(str(line)+"\n"+timestr(alltime)+" --> "+timestr(alltime+timediff)+"\n"+str(line)+"\n"+"<font color=#5F9F9F>"+time.strftime("%H:%M:%S",time.localtime(starttime))+"  ->  "+time.strftime("%H:%M:%S",time.localtime(stoptime))+"</font> "+"<font color=#4D4DFF>"+timestr(timediff)+"</font>"+"\n")
                line = line + 1
                log("StopTime:  "+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(stoptime))+"    结束时间: "+timestr(alltime+timediff)+"\n")
                print("StopTime:  "+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(stoptime))+"    结束时间: "+timestr(alltime+timediff))
                print("Time: "+timestr(timediff)+"\n")
                 
        wavstream.stop_stream()
        wavstream.close()
        audio.terminate()
        
    def stoprecord(self):
        print("stop")
        self.bRecord = False
        
    def pause(self):
        print("pause")
        self.rr = False
       
    def next(self):
        print("next")
        self.rr = True
        
def abslist(a):
    return list(map(abs,a))
def mean(a):
    return numpy.longlong(sum(a))/len(a)

def log(msg):
    with open('C:/Users/Public/RE/'+localtimestr+ '.txt','a+') as file:
        file.write(msg+"\n")
        file.close()

def srt(msg):
    with open('C:/Users/Public/RE/'+localtimestr+ '.srt','a+') as file:
        file.write(msg+"\n")
        file.close()

        
def timestr(sec):
    m,s = divmod(sec,60)
    h,m = divmod(m,60)
    return str("%d:%02d:%.2f"%(h,m,s))



    
rt = RecordThread()
line = line + 1
#print(timestr(2.65))

log("RUN ...... Start At "+localtimestr+"    SYS OK!"+"  Frequency:91.1Mhz")
srt(str(line)+"\n"+"0:00:00.0 --> 0:00:30.0\n"+"{\\an8}"+"<font color=#FFFF00>"+ str(time.strftime("%Y/%m/%d %H:%M:%S",localtime))+"</font>"+" <font color=#00FFFF>(20:00-21:00)</font>"+"\n"+"<font color=#00FF00>438.025 -5 88.5 <i>QTH Suzhou Jiangsu China</i></font>\n<font color=#3299CC>苏州市业余无线电 472752158</font>\n\n1\n0:00:00.0 --> 0:00:30.0\n{\\an5}请遵守<font color=#FF0000><u><b>《中华人民共和国无线电管理条例》</b></u></font>\n")
print("RUN ...... Start At "+localtimestr+"    SYS OK!"+"  Frequency:91.1Mhz")
rt.start()

Docker run xxx,E Time Elapsed: 0:00:00.000180

Problem description:

Environment: Windows 10, debug the local project, upload it to Docker for execution, and keep reporting errors (error message below). When Docker builds, there are abnormal prompts (below) : at first I thought it was a permission problem, but later I searched it and found it was not this problem.

SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to
build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.


address:

“Pandas” is not required to be written in the required. Txt file for “Pandas”.

1. Check that the project's dependencies are all written to the requirements.txt file and installed successfully!
2. Check your Python virtual environment to make sure it works!
3. Double check the project runtime environment!

pytorch RuntimeError: Error(s) in loading state_ Dict for dataparall… Import model error solution

When importing model files in pytorch, the following error is reported.

RuntimeError: Error(s) in loading state_dict for DataParallel:
Unexpected running stats buffer(s) “module.norm1.norm_func.running_mean” and “module.norm1.norm_func.running_var” for InstanceNorm2d with track_running_stats=False. If state_dict is a checkpoint saved before 0.4.0, this may be expected because InstanceNorm2d does not track running stats by default since 0.4.0. Please remove these keys from state_dict. If the running stats are actually needed, instead set track_running_stats=True in InstanceNorm2d to enable them. See the documentation of InstanceNorm2d for details.

Unexpected running stats buffer(s) “module.res5.norm1.norm_func.running_mean” and “module.res5.norm1.norm_func.running_var” for InstanceNorm2d with track_running_stats=False. If state_dict is a checkpoint saved before 0.4.0, this may be expected because InstanceNorm2d does not track running stats by default since 0.4.0. Please remove these keys from state_dict. If the running stats are actually needed, instead set track_running_stats=True in InstanceNorm2d to enable them. See the documentation of InstanceNorm2d for details.

Process finished with exit code 0
According to the hint, the imported model was generated with pytorch 0.4.0, but now we are using pytorch 1.0, so we checked the load_state_dict function in the module.

I guess the keyword has changed with the version.
The solution is.
Change the above statement to

    model_dict = torch.load(args.test_weight_path)
    model_dict_clone = model_dict.copy()
    for key, value in model_dict_clone.items():
        if key.endswith(('running_mean', 'running_var')):
            del model_dict[key]

    Gnet.load_state_dict(model_dict,False)