185 lines
6.1 KiB
Python
185 lines
6.1 KiB
Python
from sklearn.base import BaseEstimator, TransformerMixin
|
|
from sklearn.feature_extraction import DictVectorizer
|
|
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
|
|
from sklearn.preprocessing import LabelEncoder
|
|
from sklearn.pipeline import Pipeline, FeatureUnion
|
|
from sklearn.naive_bayes import MultinomialNB
|
|
from sklearn.neural_network import MLPClassifier
|
|
from sklearn.model_selection import train_test_split
|
|
import numpy as np
|
|
import yaml
|
|
from storage import MailThread,db_session
|
|
|
|
with open("data.yml", 'r') as stream:
|
|
try:
|
|
train=yaml.load(stream)
|
|
except yaml.YAMLError as exc:
|
|
print(exc)
|
|
|
|
data_types= { "answered": bool, "maintopic": str}
|
|
|
|
def store_training_data(i, d,key=b"answered"):
|
|
global train
|
|
if not data_types.has_key(key):
|
|
raise ValueError("Key "+str(key)+" unknown")
|
|
if not train.has_key(i):
|
|
train[i]={}
|
|
if not key is None and type(train[i]) is dict:
|
|
if not type(d) is data_types[key]:
|
|
# print str(type(d)) + " vs " + str(data_types[key])
|
|
raise TypeError("Data - %s - for key "% d +str(key)+" must be " +str(data_types[key])+ " but it is "+ str(type(d)))
|
|
train[i][key]=d
|
|
|
|
|
|
with open("data.yml","w") as file:
|
|
file.write(yaml.dump(train,default_flow_style=True))
|
|
file.close()
|
|
|
|
|
|
# Lade Trainingsdaten fuer einen angegebenen key (Label/Eigenschaft)
|
|
def get_training_threads(key="answered"):
|
|
t_a=[]
|
|
d_a=[]
|
|
d_a2=[]
|
|
for i in train:
|
|
t=db_session.query(MailThread).filter(MailThread.firstmail==i).first()
|
|
if not t is None: # Thread muss in der Datenbank sein
|
|
if train[i].has_key(key): # In den Trainingsdaten muss der relevante Key sein
|
|
t_a.append(t)
|
|
d_a.append(train[i][key])
|
|
le=LabelEncoder()
|
|
d_a2=le.fit_transform(d_a)
|
|
return (t_a,d_a2,le)
|
|
|
|
|
|
def in_training(i, key="answered"):
|
|
return train.has_key(i) and train[i].has_key(key)
|
|
|
|
|
|
def print_answers(l):
|
|
cc=l.classes_
|
|
c_id=l.transform(cc)
|
|
for i,c in enumerate(cc):
|
|
print str(i) + ": " + str(c)
|
|
return None
|
|
|
|
|
|
class ThreadDictExtractor(BaseEstimator, TransformerMixin):
|
|
def fit(self, x, y=None):
|
|
return self
|
|
def transform(self, X,y=None):
|
|
return [t.mail_flat_dict() for t in X]
|
|
|
|
class ThreadSubjectExtractor(BaseEstimator, TransformerMixin):
|
|
def fit(self, x, y=None):
|
|
return self
|
|
def transform(self, X,y=None):
|
|
return [t.subject() for t in X]
|
|
|
|
class ThreadTextExtractor(BaseEstimator, TransformerMixin):
|
|
def fit(self, x, y=None):
|
|
return self
|
|
def transform(self, X,y=None):
|
|
return [t.text() for t in X]
|
|
|
|
|
|
|
|
|
|
|
|
def build_pipe(p=b"pipe1"):
|
|
|
|
if p == "pipe1":
|
|
p=Pipeline([('tde', ThreadDictExtractor()),
|
|
('dv',DictVectorizer()),
|
|
('clf', MultinomialNB())
|
|
])
|
|
elif p=="pipe2":
|
|
p = Pipeline([
|
|
('union', FeatureUnion(transformer_list=[
|
|
('subject', Pipeline([('tse', ThreadSubjectExtractor()),
|
|
('cv',CountVectorizer()),
|
|
('tfidf', TfidfTransformer())
|
|
])),
|
|
('text', Pipeline([('tte',ThreadTextExtractor()),
|
|
('cv',CountVectorizer()),
|
|
('tfidf', TfidfTransformer())
|
|
])),
|
|
('envelope', Pipeline([('tde', ThreadDictExtractor()),
|
|
('dv',DictVectorizer())
|
|
]))
|
|
], transformer_weights={
|
|
'subject': 1,
|
|
'text': 0.7,
|
|
'envelope': 0.7
|
|
} )),
|
|
('clf', MultinomialNB())
|
|
])
|
|
elif p=="pipe2b":
|
|
p = Pipeline([
|
|
('union', FeatureUnion(transformer_list=[
|
|
('subject', Pipeline([('tse', ThreadSubjectExtractor()),
|
|
('cv',CountVectorizer()),
|
|
('tfidf', TfidfTransformer())
|
|
])),
|
|
('text', Pipeline([('tte',ThreadTextExtractor()),
|
|
('cv',CountVectorizer()),
|
|
('tfidf', TfidfTransformer())
|
|
])),
|
|
('envelope', Pipeline([('tde', ThreadDictExtractor()),
|
|
('dv',DictVectorizer())
|
|
]))
|
|
], transformer_weights={
|
|
'subject': 1,
|
|
'text': 0.7,
|
|
'envelope': 0.7
|
|
} )),
|
|
('mlc', MLPClassifier())
|
|
])
|
|
elif p=="pipe2c":
|
|
p = Pipeline([
|
|
('union', FeatureUnion(transformer_list=[
|
|
('subject', Pipeline([('tse', ThreadSubjectExtractor()),
|
|
('cv',CountVectorizer()),
|
|
('tfidf', TfidfTransformer())
|
|
])),
|
|
('text', Pipeline([('tte',ThreadTextExtractor()),
|
|
('cv',CountVectorizer()),
|
|
('tfidf', TfidfTransformer())
|
|
])),
|
|
('envelope', Pipeline([('tde', ThreadDictExtractor()),
|
|
('dv',DictVectorizer())
|
|
]))
|
|
], transformer_weights={
|
|
'subject': 1,
|
|
'text': 1,
|
|
'envelope': 0.4
|
|
} )),
|
|
('mlc', MLPClassifier())
|
|
])
|
|
else:
|
|
raise ValueError("The pipe %s is not a valid pipe")
|
|
return p
|
|
|
|
def get_pipe(p=b"pipe1",k=b"answered"):
|
|
p=build_pipe(p)
|
|
tt= get_training_threads(k)
|
|
p.fit(tt[0],tt[1])
|
|
return p,tt[2]
|
|
|
|
from sklearn.metrics import accuracy_score
|
|
|
|
def test_pipe(pp,k):
|
|
tt= get_training_threads(k)
|
|
X_train,X_test,y_train,y_test=train_test_split(tt[0],tt[1],test_size=0.2)
|
|
if type(pp) is list:
|
|
for p in pp:
|
|
print "pipe: %s" % p
|
|
p=build_pipe(p)
|
|
p.fit(X_train,y_train)
|
|
ypred=p.predict(X_test)
|
|
print accuracy_score(y_test,ypred)
|
|
|
|
#pipe1=get_pipe("pipe1", "answered")
|
|
#pipe2=get_pipe("pipe2", "maintopic")
|
|
#pipe2b=get_pipe("pipe2b", "maintopic")
|