Python9-条件-定时器-队列-day40

复习

线程
线程是进程中的执行单位
线程是cpu执行的最小单位
线程之间资源共享
线程的开启和关闭以及切换的时间开销远远小于进程
线程本身可以在同一时间使用多个cpu,
python与线程
由于cpython解释器在解释代码过程中容易产生数据不安全的问题
GIL 全局解释器锁,锁的是线程
threading模块

守护进程
# 守护进程随着主代码的执行结束而结束
# 守护线程会在主线程结束之后等待子线程的结束才结束
# 主进程在执行完自己的代码之后不会立即结束,而是等待子进程结束之后,回收子进程资源
from threading import Thread
import time
def func1():
    while True:
        print('*'*10)
        time.sleep(1)
def func2():
    print('in func2')
    time.sleep(5)
t = Thread(target=func1,)
t.daemon = True
t.start()
t2 = Thread(target=func2,)
t2.start()
t2.join()
print('主线程')

线程锁

from threading import Lock,Thread
import time
def func(lock):
    global n
    lock.acquire()
    temp = n
    time.sleep(0.2)
    n = temp - 1
    lock.release()
n = 10
t_lst = []
lock = Lock()
for i in range(10):
    t = Thread(target=func,args=(lock,))
    t.start()
    t_lst.append(t)
for t in t_lst:t.join()
print(n)
from threading import RLock,Thread  #Rlock是递归锁
import time
noodle_lock = fork_lock =RLock()   #一个钥匙串上的2把钥匙
def eat1(name):
    noodle_lock.acquire()       #一把钥匙
    print('%s拿到面条了'%name)
    fork_lock.acquire()
    print('%s拿到叉子了'%name)
    print('%s吃面'%name)
    fork_lock.release()
    noodle_lock.release()
def eat2(name):
    noodle_lock.acquire()
    print('%s拿到叉子了'%name)
    time.sleep(1)
    fork_lock.acquire()
    print('%s拿到面条了'%name)
    print('%s吃面'%name)
    fork_lock.release()
    noodle_lock.release()
Thread(target=eat1,args=('alex',)).start()
Thread(target=eat2,args=('tim',)).start()
Thread(target=eat1,args=('fox',)).start()
Thread(target=eat2,args=('gi,',)).start()

 信号量

from threading import Semaphore,Thread
import time

def func(sem,a,b):
    sem.acquire()
    time.sleep(1)
    print(a+b)
    sem.release()

sem = Semaphore(4)
for i in range(10):
    t = Thread(target=func,args=(sem,i,i+5))
    t.start()
# 连接数据库及检测数据库的可连接情况
# 数据库--文件夹
# 文件夹里有好多execl表格
# 能够更方便的对数据进行增删改查
# 安全访问机制
# 第一个线程:连接数据库
# 等待一个信号告诉我们之间的网络是通的
# 连接数据库
# 第二个进程:检测与数据库之间的网络是否联通
# time.sleep()
# 将事件的状态设置为true
from threading import Thread,Event
import time,random

def connect_db(e):
    count = 0
    while count < 3:
        e.wait(0.2)   #状态为False的时候,只等待1秒钟就结束
        if e.is_set() == True:
            print('连接数据库成功')
            break
        else:
            count += 1
            print('第%s连接失败'%count)
    else:
        raise TimeoutError('数据库连接超时')

def check_web(e):
    time.sleep(random.randint(0,3))
    e.set()
e = Event()
t1 = Thread(target=connect_db,args=(e,))
t2 = Thread(target=check_web,args=(e,))
t1.start()
t2.start()

 对列

# q = queue.LifoQueue()  #栈  先进后出
# q.put(1)
# q.put(2)
# q.put(3)
# print(q.get())
# print(q.get())
q = queue.PriorityQueue()  #优先级队列
q.put((20,'a'))
q.put((10,'b'))
q.put((30,'c'))
print(q.get())

 线程池


import time
from concurrent.futures import ThreadPoolExecutor
def func(n):
time.sleep(2)
print(n)
return n*n
tpool = ThreadPoolExecutor(max_workers=5) #默认不要超过cpu个数*5
t_lst = []
for i in range(20):
t = tpool.submit(func,i)
t_lst.append(t)
tpool.shutdown() #close和join 两项操作
print('主线程')
for t in t_lst:print('***',t.result())
import time
from concurrent.futures import ThreadPoolExecutor
def func(n):
    time.sleep(2)
    print(n)
    return n*n
tpool = ThreadPoolExecutor(max_workers=5)  #默认不要超过cpu个数*5
tpool.map(func,range(20))   #拿不到返回值
import time
from concurrent.futures import ThreadPoolExecutor
def func(n):
    time.sleep(2)
    print(n)
    return n*n

def call_back(m):
    print('结果是%s'%m.result())

tpool = ThreadPoolExecutor(max_workers=5)  #默认不要超过cpu个数*5
for i in range(20):
    t = tpool.submit(func,i).add_done_callback(call_back)
原文地址:https://www.cnblogs.com/zhangtengccie/p/10421577.html