Python標(biāo)準(zhǔn)庫(kù)中的GUI界面--》》turtle
turtle的簡(jiǎn)單使用
導(dǎo)入turtle as是給起的一個(gè)別名
#import turtle as t
設(shè)置畫表的大小 10px
t.pensize(10)
繪制 NEUSOFT
水平左移
抬筆
# t.penup()
# t.goto(-260,0)
# t.pd()//落筆
#繪制N
t.left(90)
t.forward(80)
t.right(145)
#簡(jiǎn)寫
t.fd(100)
t.lt(145)
t.fd(80)
#繪制E
t.penup()
t.goto(-120,0)
t.pd()
t.lt(90)
t.fd(60)
t.rt(90)
t.fd(40)
t.rt(90)
t.fd(60)
t.lt(180)
t.fd(60)
t.rt(90)
t.fd(40)
t.rt(90)
t.fd(60)
t.penup()
t.goto(-80,80)
t.pd()
t.rt(90)
t.fd(50)
t.circle(30,180)
t.fd(50)
t.color('red')
#繪制S
t.penup()
t.goto(50,60)
t.pd()
t.circle(22,270)
t.circle(-22,270)
#繪制O
t.penup()
t.goto(140,35)
t.pd()
t.circle(30)
#讓GUI界面一直顯示庭瑰,所有代碼要寫在此函數(shù)之前
t.done()
python常用數(shù)據(jù)類型
# 列表: 與c語(yǔ)言中的數(shù)組很相似寂呛, 只不過(guò)可以存儲(chǔ)不同類型的數(shù)據(jù)
# 優(yōu)點(diǎn):靈活 ,缺點(diǎn): 效率低
# 定義方式 []
hero_name = ['魯班七號(hào)', '安琪拉', '李白', '劉備']
# 輸出
# print(hero_name)
# 遍歷
# for hero in hero_name:
# print(hero)
# 常見(jiàn)操作
# 1.列表的訪問(wèn)
# 列表名[索引]
print(hero_name[2])
# 2.添加 append
hero_name.append('后羿')
print('添加后的列表', hero_name)
# 3.修改
hero_name[1] = 1000
print('修改后的列表',hero_name)
# 4.刪除
del hero_name[1]
print('刪除后的列表',hero_name)
#輸出1-10的數(shù)組
num=[]
for i in range(1,11):
num.append(i)
print('數(shù)字',num)
python常用數(shù)據(jù)類型
# 列表: 與c語(yǔ)言中的數(shù)組很相似色解, 只不過(guò)可以存儲(chǔ)不同類型的數(shù)據(jù)
# 優(yōu)點(diǎn):靈活 ,缺點(diǎn): 效率低
# 定義方式 []
# hero_name = ['魯班七號(hào)', '安琪拉', '李白', '劉備']
# # 輸出
# # print(hero_name)
# # 遍歷
# # for hero in hero_name:
# # print(hero)
#
# 常見(jiàn)操作
# 1.列表的訪問(wèn)
# 列表名[索引]
print(hero_name[2])
# 2.添加 append
# hero_name.append('后羿')
# print('添加后的列表', hero_name)
#
# 3.修改
# hero_name[1] = 1000
# print('修改后的列表',hero_name)
#
# 4.刪除
# del hero_name[1]
# print('刪除后的列表',hero_name)
#
# 練習(xí)
# 創(chuàng)建 [1, 2, 3......10] 這樣的一個(gè)數(shù)字列表
# 1.創(chuàng)建空列表
li = []
# 2.使用for 循環(huán), 在循環(huán)中添加元素值
for i in range(1, 11):
li.append(i)
print(li)
字符串
定義形式 '' ""
切片 對(duì)序列截取一部分的操作,適用于列表
# name = 'abcdefg'
# # name[1]
# # [起始位置:終止位置:步長(zhǎng)] 左閉右開(kāi)
# print(name[1:4])
# # a c e g
# print(name[0:7:2])
# # 全切片的時(shí)候可以省略初始和終止位置
# print(name[::2])
# 常用方法
# 去兩端空格
# name = ' abcdefg '
# # 查看序列內(nèi)元素的個(gè)數(shù) len()
# print(len(name))
# name = name.strip()
# print('去空格之后', len(name))
替換
# price = '$999'
# price = price.replace('$','')
# print(price)
# # 列表變成字符串的方法 join
# li = ['a', 'b', 'c', 'd']
# a = '_'.join(li)
# print(a)
# print(type(a))
# 數(shù)字
# 元組 tuple 元組和列表很像只不過(guò)元組不可以修改
# 定義 ()
# a = ('zhangsan', 'lisi', 'wangwu',1000)
# print(a)
# print(type(a))
#
# 訪問(wèn)
# print(a[1])
# # # 修改
# # a[3] = 'zhaoliu'
#
# # 關(guān)于元組需要注意的是 只有一個(gè)元素的元組
# b = ('lisi',) #是不是元組
# c = (1000,) #是不是元組
# print(type(b))
# print(type(c))
字典 dict java hashmap
key-value數(shù)據(jù)結(jié)構(gòu)
定義形式 {}
info = {'name':'李四', 'age':34, 'addr':'重慶市渝北區(qū)'}
print(len(info))
print(info)
1.字典的訪問(wèn)
#print(info['name'])
2.修改
#info['addr'] = '北京市朝陽(yáng)區(qū)'
#print('修改后字典',info)
3.增加
#info['sex'] = 'female'
#print('增加后字典',info)
獲取字典中所有的鍵
#print(info.keys())
# 獲取字典中所有的z值
#print(info.values())
獲取字典中所有的key-value
#print(info.items())
d = [('name', '李四'), ('age', 34), ('addr', '北京市朝陽(yáng)區(qū)'), ('sex', 'female')]
d1 = dict(d)
print(d1)
遍歷字典
for k, v in info.items():
print(k, v)
# 集合
# 無(wú)序楞抡,不重復(fù)
#set1 = {'zhangsan', 'lisi', 222}
#
#print(type(set1))
# 遍歷
#for x in set1:
# print(x)
# 1.掌握python常用數(shù)據(jù)類型和語(yǔ)法
# 列表的排序
# # li = []
# # for i in range(10):
# # li.append(i)
# # print(li)
# # from random import shuffle
# # shuffle(li)
# # print('隨機(jī)打亂的列表', li)
# # li.sort(reverse=True)
# # print('排序后的列表', li)
#
# stu_info = [
# {"name":'zhangsan', "age":18},
# {"name":'lisi', "age":30},
# {"name":'wangwu', "age":99},
# {"name":'tiaqi', "age":3},
#
# ]
# print('排序前', stu_info)
#
# # def 函數(shù)名(參數(shù)):
# # 函數(shù)體
# def sort_by_age(x):
# return x['age']
# key= 函數(shù)名 --- 按照什么進(jìn)行排序
# 根據(jù)年齡大小進(jìn)行正序排序
stu_info.sort(key=sort_by_age, reverse=True)
print('排序后', stu_info)
# 練習(xí)
name_info_list = [
('張三',4500),
('李四',8900),
('王五',2500),
('趙六',6500),
]
def sort_by_grade(i):
return i[1]
# 根據(jù)元組第二個(gè)元素進(jìn)行正序排序
name_info_list.sort(key=sort_by_grade)
print(name_info_list)
2.本地文件讀取
python中使用open內(nèi)置函數(shù)進(jìn)行文件讀取
f = open(file='./novel/threekingdom.txt', mode='r', encoding='utf-8')
data = f.read()
f.close()
# data = open(file='./novel/threekingdom.txt', mode='r', encoding='utf-8').read()
print(data)
with as 上下文管理器 不用手動(dòng)關(guān)閉流
with open('./novel/threekingdom.txt', 'r', encoding='utf-8') as f:
data = f.read()
print(data)
寫入
# txt = 'i like python'
# with open('python.txt','w', encoding='utf-8') as f:
# f.write(txt)
text = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<h1>重慶師范歡迎你</h1>
</body>
</html>"""
print(text)
with open('chongqingshifan.html','w', encoding='utf-8') as f:
f.write(text)
2.本地文件讀取
python中使用open內(nèi)置函數(shù)進(jìn)行文件讀取
# data = f.read()
# f.close()
# # data = open(file='./novel/threekingdom.txt', mode='r', encoding='utf-8').read()
# print(data)
# with as 上下文管理器 不用手動(dòng)關(guān)閉流
# with open('./novel/threekingdom.txt', 'r', encoding='utf-8') as f:
# data = f.read()
# print(data)
寫入
txt = 'i like python'
with open('python.txt','w', encoding='utf-8') as f:
f.write(txt)
text = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<h1>重慶師范歡迎你</h1>
</body>
</html>"""
print(text)
with open('chongqingshifan.html','w', encoding='utf-8') as f:
f.write(text)
3.中文分詞 jieba
# 安裝jieba分詞庫(kù)
# 指定國(guó)內(nèi)鏡像安裝
#1.在用戶目錄下新建pip文件夾
# 2.新建pip.ini文件
添加
"""
[global]
index-url = http://mirrors.aliyun.com/pypi/simple/
[install]
trusted-host=mirrors.aliyun.com
"""
# pip install jieba
導(dǎo)入jieba分詞
import jieba
三種分詞模式
seg = "我來(lái)到北京清華大學(xué)"
# 精確模式 精確分詞
seg_list = jieba.lcut(seg)
print(seg_list)
# 全模式 找出所有可能的分詞結(jié)果 冗余性大
seg_list1 = jieba.lcut(seg,cut_all=True)
print(seg_list1)
# 搜索引擎模式
seg_list2 = jieba.lcut_for_search(seg)
print(seg_list2)
#
text = '小明碩士畢業(yè)于中國(guó)科學(xué)院計(jì)算所,后在日本京都大學(xué)深造'
seg_list4 = jieba.lcut(text,cut_all=True)
print(seg_list4)
# 搜索引擎模式 先執(zhí)行精確模式析藕,在對(duì)其中的長(zhǎng)詞進(jìn)行處理
seg_list5 = jieba.lcut_for_search(text)
print(seg_list5)
# nlp
import jieba
# 三國(guó)演義小說(shuō)分詞
# 讀取三國(guó)演義小說(shuō)
#with open('./novel/threekingdom.txt','r', encoding='utf-8') as f:
# words = f.read()
# print(len(words)) # 字?jǐn)?shù) 55萬(wàn)
#words_list = jieba.lcut(words)
# print(len(words_list)) # 分詞后的詞語(yǔ)數(shù) 35萬(wàn)
#print(words_list)
分詞
導(dǎo)入jieba分詞
import jieba
三種分詞模式
#seg = "我來(lái)到北京清華大學(xué)"
# 精確模式 精確分詞
#seg_list = jieba.lcut(seg)
#print(seg_list)
# 全模式 找出所有可能的分詞結(jié)果 冗余性大
#seg_list1 = jieba.lcut(seg,cut_all=True)
#print(seg_list1)
# 搜索引擎模式
#seg_list2 = jieba.lcut_for_search(seg)
#print(seg_list2)
#
#text = '小明碩士畢業(yè)于中國(guó)科學(xué)院計(jì)算所召廷,后在日本京都大學(xué)深造'
#seg_list4 = jieba.lcut(text,cut_all=True)
#print(seg_list4)
# 搜索引擎模式 先執(zhí)行精確模式,在對(duì)其中的長(zhǎng)詞進(jìn)行處理
#seg_list5 = jieba.lcut_for_search(text)
#print(seg_list5)
4. 詞云的展示
from wordcloud import WordCloud
import jieba
import imageio
繪制詞云
# text = 'He was an old man who fished alone in a skiff in the Gulf Stream and he had gone eighty-four days now without taking a fish. In the first forty days a boy had been with him. But after forty days without a fish the boy’s parents had told him that the old man was now definitely and finally salao, which is the worst form of unlucky, and the boy had gone at their orders in another boat which caught three good fish the first week. It made the boy sad to see the old man come in each day with his skiff empty and he always went down to help him carry either the coiled lines or the gaff and harpoon and the sail that was furled around the mast. The sail was patched with flour sacks and, furled, it looked like the flag of permanent defeat.'
# wc = WordCloud().generate(text)
# wc.to_file('老人與海.png')
三國(guó)演義小說(shuō)詞云繪制
三國(guó)演義小說(shuō)分詞
讀取三國(guó)演義小說(shuō)
mask = imageio.imread('./china.jpg')
with open('./novel/threekingdom.txt','r', encoding='utf-8') as f:
words = f.read()
# print(len(words)) # 字?jǐn)?shù) 55萬(wàn)
words_list = jieba.lcut(words)
# print(len(words_list)) # 分詞后的詞語(yǔ)數(shù) 35萬(wàn)
print(words_list)
# 將words_list轉(zhuǎn)化成字符串
novel_words = " ".join(words_list)
print(novel_words)
# WordCloud()里面設(shè)置參數(shù)
wc = WordCloud(
font_path='msyh.ttc',
background_color='white',
width=800,
height=600,
mask=mask
).generate(novel_words)
wc.to_file('三國(guó)詞云.png')