进程池 和 管道 , 进程之间的 信息共享

# 管道实现生产者消费者模型
# from multiprocessing import Lock,Pipe,Process
# def producer(con,pro,name,food):
# con.close()
# for i in range(100):
# f = '%s生产%s%s'%(name,food,i)
# print(f)
# pro.send(f)
# pro.send(None)
# pro.send(None)
# pro.send(None)
# pro.close()
#
# def consumer(con,pro,name,lock):
# pro.close()
# while True:
# lock.acquire()
# food = con.recv()
# lock.release()
# if food is None:
# con.close()
# break
# print('%s吃了%s' % (name, food))
# if __name__ == '__main__':
# con,pro = Pipe()
# lock= Lock()
# p = Process(target=producer,args=(con,pro,'egon','泔水'))
# c1 = Process(target=consumer, args=(con, pro, 'alex',lock))
# c2 = Process(target=consumer, args=(con, pro, 'bossjin',lock))
# c3 = Process(target=consumer, args=(con, pro, 'wusir',lock))
# c1.start()
# c2.start()
# c3.start()
# p.start()
# con.close()
# pro.close()

# from multiprocessing import Process,Pipe,Lock
#
# def consumer(produce, consume,name,lock):
# produce.close()
# while True:
# lock.acquire()
# baozi=consume.recv()
# lock.release()
# if baozi:
# print('%s 收到包子:%s' %(name,baozi))
# else:
# consume.close()
# break
#
# def producer(produce, consume,n):
# consume.close()
# for i in range(n):
# produce.send(i)
# produce.send(None)
# produce.send(None)
# produce.close()
#
# if __name__ == '__main__':
# produce,consume=Pipe()
# lock = Lock()
# c1=Process(target=consumer,args=(produce,consume,'c1',lock))
# c2=Process(target=consumer,args=(produce,consume,'c2',lock))
# p1=Process(target=producer,args=(produce,consume,30))
# c1.start()
# c2.start()
# p1.start()
# produce.close()
# consume.close()

# pipe 数据不安全性
# IPC
# 加锁来控制操作管道的行为 来避免进程之间争抢数据造成的数据不安全现象

# 队列 进程之间数据安全的
# 管道 + 锁


#进城之间的 数据共享
# 进程间数据是独立的,可以借助于队列或管道实现通信,二者都是基于消息传递的
# 虽然进程间数据独立,但可以通过Manager实现数据共享,事实上Manager的功能远不止于此
# from multiprocessing import Manager,Process

# def main(dic):
# dic['count'] -= 1
# print(dic)
#
# if __name__ == '__main__':
# m = Manager()
# dic=m.dict({'count':100})
# p_lst = []
# p = Process(target=main, args=(dic,))
# p.start()
# p.join()

# from multiprocessing import Manager,Process,Lock
# def main(dic,lock):
# dic['count'] -= 1
#
# if __name__ == '__main__':
# m = Manager()
# l = Lock()
# dic=m.dict({'count':100})
# p_lst = []
# for i in range(50):
# p = Process(target=main,args=(dic,l))
# p.start()
# p_lst.append(p)
# for i in p_lst: i.join()

# ======================= 重点 进程池
#进程池 相比 多进程
#1效率高
#2节省内存
#3减轻cpu负担
#进程池概念
# 定义一个池子,在里面放上固定数量的进程,有需求来了,就拿一个池中的进程来处理任务,等到处理完毕,进程并不关闭
# 而是将进程再放回进程池中继续等待任务。如果有很多任务需要执行,池中的进程数量不够,
# 任务就要等待之前的进程执行任务完毕归来,拿到空闲进程才能继续执行。也就是说,池中进程的数量是固定的,
# 那么同一时间最多有固定数量的进程在运行。这样不会增加操作系统的调度难度,还节省了开闭进程的时间,
# 也一定程度上能够实现并发效果。

#发现:并发开启多个客户端,服务端同一时间只有4个不同的pid,只能结束一个客户端,另外一个客户端才会进来.

# 进程池的第二种方法
# import os
# import time
# from multiprocessing import Pool
# def func(n):
# print('start func%s'%n,os.getpid())
# time.sleep(1)
# print('end func%s' % n,os.getpid())
# if __name__ == '__main__':
# p = Pool(5)
# for i in range(10):
# p.apply_async(func,args=(i,))
# p.close() # 结束进程池接收任务
# p.join() # 感知进程池中的任务执行结束

# 进程池的 返回值
# p = Pool()
# p.map(funcname,iterable) 默认异步的执行任务,且自带close和join
# p.apply 同步调用的
# p.apply_async 异步调用 和主进程完全异步 需要手动close 和 join
# from multiprocessing import Pool
# def func(i):
# return i*i
#
# if __name__ == '__main__':
# p = Pool(5)
# for i in range(10):
# res = p.apply(func,args=(i,)) # apply的结果就是func的返回值
# print(res)
# import time
# from multiprocessing import Pool
# def func(i):
# time.sleep(0.5)
# return i*i
#
# if __name__ == "__main__":
# p = Pool(5)
# res_1 = []
# for i in range(10):
# res = p.apply_async(func,args=(i,))
# res_1.append(res)
# for res in res_1:
# print(res.get())



# import time
# from multiprocessing import Pool
# def func(i):
# time.sleep(0.5)
# return i*i
#
# if __name__ == '__main__':
# p = Pool(5)
# res_l = []
# for i in range(10):
# res = p.apply_async(func,args=(i,)) # apply的结果就是func的返回值
# res_l.append(res)
# for res in res_l:print(res.get())# 等着 func的计算结果

# import time
# from multiprocessing import Pool
# def func(i):
# time.sleep(0.5)
# return i*i
#
# if __name__ == '__main__':
# p = Pool(5)
# ret = p.map(func,range(100))
# print(ret) #所有的 内容一块打印出来


# 回调函数
# import os
# from multiprocessing
#
#
# def func1(n):
# print('in func1',
# return n * n
#
#
# def func2(nn):
# print('in func2',
# print(nn)
#
# if __name__ == '__mai
# print('主进程 :', os.
# p = Pool(5)
# for i in range(10
# p.apply_async
# p.close()
# p.join()
# 回调函数是在主进程中执行的
# from multiprocessing import Pool
# def func1(n):
# return n+1
#
# def func2(m):
# print(m)
#
# if __name__ == '__main__':
# p = Pool(5)
# for i in range(10,20):
# p.apply_async(func1,args=(i,),callback=func2)
# p.close()
# p.join()
原文地址:https://www.cnblogs.com/xuerh/p/8671649.html