word2vec tf實戰

下載語料庫

# 第一步: 在下面這個地址下載語料庫
def maybe_download(filename, expected_bytes):
	"""
	這個函數的功能是:
		如果filename不存在,就在上面的地址下載它。
		如果filename存在,就跳過下載。
		最終會檢查文字的字節數是否和expected_bytes相同。
	"""
	if not os.path.exists(filename):
		print('start downloading...')
		filename, _ = urllib.request.urlretrieve(url + filename, filename)
	statinfo = os.stat(filename)
	if statinfo.st_size == expected_bytes:
		print('Found and verified', filename)
	else:
		print(statinfo.st_size)
		raise Exception(
			'Failed to verify ' + filename + '. Can you get to it with a browser?')
	return filename

# 下載語料庫text8.zip並驗證下載
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', 31344016)
# 將語料庫解壓,並轉換成一個word的list
def read_data(filename):
	"""
	這個函數的功能是:
		將下載好的zip文件解壓並讀取爲word的list
	"""
	with zipfile.ZipFile(filename) as f:
		data = tf.compat.as_str(f.read(f.namelist()[0])).split()
	return data

vocabulary = read_data(filename)
print('Data size', len(vocabulary)) # 總長度爲1700萬左右
print(vocabulary[0:100]) # 輸出前100個詞。

製作詞表

# 第二步: 製作一個詞表,將不常見的詞變成一個UNK標識符
# 詞表的大小爲5萬(即我們只考慮最常出現的5萬個詞)
vocabulary_size = 50000
def build_dataset(words, n_words):
	"""
	函數功能:將原始的單詞表示變成index
	"""
	count = [['UNK', -1]]#篩選5W的單詞
	count.extend(collections.Counter(words).most_common(n_words - 1))
	#collections.Counter()計數器,每個單詞出現的次數。次數top截取n_words - 1個。
	dictionary = dict()#5W單詞,對每個單詞進行編碼
	for word, _ in count:
		dictionary[word] = len(dictionary)
	data = list()# words:將不常見的詞變成一個UNK標識符,常見的詞變成編碼。
	unk_count = 0# 記錄不長出現的詞的個數,也就是0的個數
	for word in words:
		if word in dictionary:
		index = dictionary[word]
		else:
			index = 0  # UNK的index爲0
			unk_count += 1
		data.append(index)
	count[0][1] = unk_count
	reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))#key values互換位置
	return data, count, dictionary, reversed_dictionary

data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,vocabulary_size)
del vocabulary  # 刪除已節省內存
print('Most common words (+UNK)', count[:5]) # 輸出最常出現的5個單詞
# 輸出轉換後的數據庫data,和原來的單詞(前10個)
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
# 我們下面就使用data來製作訓練集
data_index = 0

生成詞對

# 第三步:定義一個函數,用於生成skip-gram模型用的batch
def generate_batch(batch_size, num_skips, skip_window):
	# data_index相當於一個指針,初始爲0
	# 每次生成一個batch,data_index就會相應地往後推
	global data_index
	assert batch_size % num_skips == 0
	assert num_skips <= 2 * skip_window
	batch = np.ndarray(shape=(batch_size), dtype=np.int32)
	labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
	span = 2 * skip_window + 1  # [ skip_window target skip_window ]
	buffer = collections.deque(maxlen=span)#buffer:deque([])
	# data_index是當前數據開始的位置
	# 產生batch後就往後推1位(產生batch)
	for _ in range(span):
		buffer.append(data[data_index])
		data_index = (data_index + 1) % len(data)
	for i in range(batch_size // num_skips):
		# 利用buffer生成batch
		# buffer是一個長度爲 2 * skip_window + 1長度的word list
		# 一個buffer生成num_skips個數的樣本
		# print([reverse_dictionary[i] for i in buffer])
		target = skip_window  # target label at the center of the buffer
		# targets_to_avoid保證樣本不重複
		targets_to_avoid = [skip_window]
		for j in range(num_skips):
			while target in targets_to_avoid:
				target = random.randint(0, span - 1)
			targets_to_avoid.append(target)
			batch[i * num_skips + j] = buffer[skip_window]
			labels[i * num_skips + j, 0] = buffer[target]
		buffer.append(data[data_index])
		# 每利用buffer生成num_skips個樣本,data_index就向後推進一位
		data_index = (data_index + 1) % len(data)
	data_index = (data_index + len(data) - span) % len(data)
	return batch, labels
# 默認情況下skip_window=1, num_skips=2
# 此時就是從連續的3(3 = skip_window*2 + 1)個詞中生成2(num_skips)個樣本。
# 如連續的三個詞['used', 'against', 'early']
# 生成兩個樣本:against -> used, against -> early
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)#產生8對batch
for i in range(8):
    print(batch[i], reverse_dictionary[batch[i]],
        '->', labels[i, 0], reverse_dictionary[labels[i, 0]])

建立模型

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章