直接上源碼吧
tokenizer類:
#_*_encoding:utf-8_*_
from ctypes import *
class tokenizer:
def __init__(self):
self._stext=['、','“','”',',','。','《','》',':',';','!','‘','’','?','?','!','·',' ',''] #枚舉標點符號包括空格
self._stopword_list=[line for line in file('stopword.txt')]
self._stopword_list=map(lambda x: x.strip(),self._stopword_list) # 去掉行尾的空格
def parse(self,text):
atext_list=[]#存放要分詞的文檔
rtext=[]#存放去標點符號和分詞后的詞
participle = cdll.LoadLibrary('X:\\API\\ICTCLAS50.dll')
participle.ICTCLAS_Init(c_char_p('X:\\API'))
strlen = len(c_char_p(text).value)
t = c_buffer(strlen*6)
a =participle.ICTCLAS_ParagraphProcess(c_char_p(text),c_int(strlen),t,c_int(3),0)
atext_list=t.value.split(' ')
participle.ICTCLAS_Exit()
rtext=[item for item in atext_list if item not in self._stext]
result_list=[iword for iword in rtext if iword not in self._stopword_list]
return result_list
調用tokenizer類,test_tokenizer類:
#_*_encoding:utf-8_*_
import tokenizer
text="文本的分類和聚類是一個比較有意思的話題,我以前也寫過一篇blog《基于K-Means的文本聚類算法》,加上最近讀了幾本數據挖掘和機器學習的書籍,因此很想寫點東西來記錄下學習的所得。"
list=tokenizer.tokenizer().parse(text)
for item in list:
print item