<a href="http://www.reibang.com/p/54870e9541fc">總目錄</a>
課程頁面:https://www.udacity.com/course/intro-to-computer-science--cs101
授課教師:Dave Evans https://www.cs.virginia.edu/~evans/
如下內容包含課程筆記和自己的擴展折騰
factorial (recursion)
# -*- coding: utf-8 -*-
# Define a procedure, factorial, that takes a natural number as its input, and
# returns the number of ways to arrange the input number of items.
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
print factorial(0)
#>>> 1
print factorial(5)
#>>> 120
print factorial(10)
#>>> 3628800
Palindromes
# -*- coding: utf-8 -*-
# Define a procedure is_palindrome, that takes as input a string, and returns a
# Boolean indicating if the input string is a palindrome.
# Base Case: '' => True
# Recursive Case: if first and last characters don't match => False
# if they do match, is the middle a palindrome?
# 下面的是我的算法:
def is_palindrome(s):
if s == "":
return True
else:
return s[0] == s[-1] and is_palindrome(s[1:-1])
'''
這里是Udacity的算法禽绪,和我的思路稍許不一樣:
def is_palindrome(s):
if s == "":
return True
else:
if s[0] == s[-1]:
return is_palindrome(s[1:-1])
else:
return False
'''
print is_palindrome('')
#>>> True
print is_palindrome('abab')
#>>> False
print is_palindrome('abba')
#>>> True
print is_palindrome('aba')
# True
Evans有提及髓窜,相比于interactive way演痒,對python來說橘洞,the recursive way is fairly expensive... 有時候還是最好用數(shù)學上的一些算法優(yōu)化一下恰力。
Fibonacci numbers (recursive)
# -*- coding: utf-8 -*-
# Define a procedure, fibonacci, that takes a natural number as its input, and
# returns the value of that fibonacci number.
# Two Base Cases:
# fibonacci(0) => 0
# fibonacci(1) => 1
# Recursive Case:
# n > 1 : fibonacci(n) => fibonacci(n-1) + fibonacci(n-2)
def fibonacci(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
print fibonacci(0)
#>>> 0
print fibonacci(1)
#>>> 1
print fibonacci(15)
#>>> 610
Fibonacci numbers (interactive)
# -*- coding: utf-8 -*-
#Define a faster fibonacci procedure that will enable us to computer
#fibonacci(36).
# 下面是我的算法谐腰,寫的很臃腫
def fibonacci(n):
fibo1 = 0
fibo2 = 1
count = 1
if n == 1:
return 1
if n == 0:
return 0
while count < n:
r = fibo1 + fibo2
fibo1 = fibo2
fibo2 = r
count +=1
return r
'''
# Udacity的算法相較之下好多了
def fibonacci(n):
current = 0
after = 1
for i in range(n):
current, after = after, current+after
return current
'''
print fibonacci(36)
#>>> 14930352
Implementing Urank
# -*- coding: utf-8 -*-
# Modify the crawl_web procedure so that instead of just returning the
# index, it returns an index and a graph. The graph should be a
# Dictionary where the key:value entries are:
# url: [list of pages url links to]
def crawl_web(seed): # returns index, graph of outlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>:[list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
#Insert Code Here
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipes:
<ul>
<li> <a >Hummus Recipe</a>
<li> <a s Best Hummus</a>
<li> <a s Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a >Nickel Chef</a>
and <a >Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a >the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a >this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a >
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbanzo beans.
<li> Crush them in a blender.
<li> Add 3 tablespoons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a >Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def get_page(url):
if url in cache:
return cache[url]
else:
return None
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
index , graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
if 'http://udacity.com/cs101x/urank/index.html' in graph:
print graph['http://udacity.com/cs101x/urank/index.html']
#>>> ['http://udacity.com/cs101x/urank/hummus.html',
#'http://udacity.com/cs101x/urank/arsenic.html',
#'http://udacity.com/cs101x/urank/kathleen.html',
#'http://udacity.com/cs101x/urank/nickel.html',
#'http://udacity.com/cs101x/urank/zinc.html']
Computer Ranks
# -*- coding: utf-8 -*-
#Finishing the page ranking algorithm.
def compute_ranks(graph):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph) # 6
for page in graph:
ranks[page] = 1.0 / npages # 0.16667
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for item in graph:
if page in graph[item]:
newrank += d*(ranks[item]/len(graph[item]))
newranks[page] = newrank
ranks = newranks
return ranks
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a >Hummus Recipe</a>
<li> <a s Best Hummus</a>
<li> <a s Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a >Nickel Chef</a>
and <a >Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a >the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a >this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a >
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a >Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def get_page(url):
if url in cache:
return cache[url]
else:
return None
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
print ranks
#>>> {'http://udacity.com/cs101x/urank/kathleen.html': 0.11661866666666663,
#'http://udacity.com/cs101x/urank/zinc.html': 0.038666666666666655,
#'http://udacity.com/cs101x/urank/hummus.html': 0.038666666666666655,
#'http://udacity.com/cs101x/urank/arsenic.html': 0.054133333333333325,
#'http://udacity.com/cs101x/urank/index.html': 0.033333333333333326,
#'http://udacity.com/cs101x/urank/nickel.html': 0.09743999999999997}