from sklearn.base import BaseEstimator, TransformerMixin from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.naive_bayes import MultinomialNB from sklearn.neural_network import MLPClassifier from sklearn.model_selection import train_test_split import numpy as np import yaml from storage import MailThread,db_session from sklearn.metrics import accuracy_score, confusion_matrix #with open("data.yml", 'r') as stream: # try: # train=yaml.load(stream) # except yaml.YAMLError as exc: # print(exc) data_types= { "answered": bool, "maintopic": str, "lang": str} def set_train_data(i,d,key=b"answered"): global train #------------------------------------ if not data_types.has_key(key): raise ValueError("Key "+str(key)+" unknown") if not train.has_key(i) or train[i] is None: train[i]={} if not type(d) is data_types[key]: raise TypeError("Data - %s - for key "% d +str(key)+" must be " +str(data_types[key])+ " but it is "+ str(type(d))) #------------------------------------ train[i][key]=d def store_training_data(i, d,key=b"answered"): set_train_data(i,d,key) with open("data.yml","w") as file: file.write(yaml.dump(train,default_flow_style=True)) file.close() # Lade Trainingsdaten fuer einen angegebenen key (Label/Eigenschaft) def get_training_threads(key="answered", filters=[]): if not data_types.has_key(key): raise ValueError("Key "+str(key)+" unknown") #------------------------------------ t_a=[] d_a=[] d_a2=[] #------------------------------------ if "db" in filters: q=db_session.query(MailThread).filter(MailThread.istrained.is_(True)) if "de" in filters: q=q.filter(MailThread.lang=="de") elif "en" in filters: q=q.filter(MailThread.lang=="en") tt=q.all() for t in tt: t_a.append(t) if key =="answered": d_a.append(t.is_answered()) elif key=="maintopic": d_a.append(t.maintopic) elif key=="lang": d_a.append(t.lang) else: raise ValueError("Database Filter now required") le=LabelEncoder() d_a2=le.fit_transform(d_a) return (t_a,d_a2,le) # else: # for i in train: # if train[i].has_key(key): # In den Trainingsdaten muss der relevante Key sein # t=db_session.query(MailThread).filter(MailThread.firstmail==i).first#() # if not t is None: # Thread muss in der Datenbank sein # t_a.append(t) # d_a.append(train[i][key]) def in_training(i, key="answered"): return train.has_key(i) and train[i].has_key(key) def print_answers(l): cc=l.classes_ c_id=l.transform(cc) for i,c in enumerate(cc): print str(i) + ": " + str(c) return None class ThreadDictExtractor(BaseEstimator, TransformerMixin): def fit(self, x, y=None): return self def transform(self, X,y=None): return [t.mail_flat_dict() for t in X] class ThreadSubjectExtractor(BaseEstimator, TransformerMixin): def fit(self, x, y=None): return self def transform(self, X,y=None): return [t.subject() for t in X] class ThreadTextExtractor(BaseEstimator, TransformerMixin): def fit(self, x, y=None): return self def transform(self, X,y=None): return [t.text() for t in X] class ThreadFirstTextExtractor(BaseEstimator, TransformerMixin): def fit(self, x, y=None): return self def transform(self, X,y=None): return [t.text("first") for t in X] def get_pipe(p=b"pipe1",k=b"answered",filters=[]): p=build_pipe(p) tt= get_training_threads(k,filters) #print tt if len(tt[0]) > 0: p.fit(tt[0],tt[1]) return p,tt[2] else: return None, None def test_pipe(pp,k,f=[]): tt= get_training_threads(k,f) X_train,X_test,y_train,y_test=train_test_split(tt[0],tt[1],test_size=0.4) if type(pp) is list: for p in pp: print "pipe: %s" % p p=build_pipe(p) p.fit(X_train,y_train) ypred=p.predict(X_test) print tt[2].classes_ print accuracy_score(y_test,ypred) print confusion_matrix(y_test,ypred) def build_pipe(p=b"pipe1"): if p == "pipe1": p=Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()), ('clf', MultinomialNB()) ]) elif p=="pipe2": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1, 'text': 0.7, 'envelope': 0.7 } )), ('clf', MultinomialNB()) ]) elif p=="pipe2b": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1, 'text': 0.7, 'envelope': 0.7 } )), ('mlc', MLPClassifier()) ]) elif p=="pipe2d": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('firsttext', Pipeline([('tte',ThreadFirstTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1.3, 'text': 1, 'firsttext': 0.9, 'envelope': 0.2 } )), ('mlc', MLPClassifier()) ]) elif p=="pipe2e": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('firsttext', Pipeline([('tte',ThreadFirstTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1.3, 'text': 1, 'firsttext': 0.9, 'envelope': 0.2 } )), ('mlc', MLPClassifier(hidden_layer_sizes=(100,100))) ]) elif p=="pipe2e1": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('firsttext', Pipeline([('tte',ThreadFirstTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1.3, 'text': 1, 'firsttext': 0.9, 'envelope': 0.2 } )), ('mlc', MLPClassifier(hidden_layer_sizes=(100,100,50))) ]) elif p=="pipe2f": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer(ngram_range=(1,1))), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer(ngram_range=(1,1))), ('tfidf', TfidfTransformer()) ])), ('firsttext', Pipeline([('tte',ThreadFirstTextExtractor()), ('cv',CountVectorizer(ngram_range=(1,2))), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1.3, 'text': 1, 'firsttext': 0.9, 'envelope': 0.2 } )), ('mlc', MLPClassifier(hidden_layer_sizes=(100,100))) ]) elif p=="pipe2g": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer(ngram_range=(1,1))), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer(ngram_range=(1,1))), ('tfidf', TfidfTransformer()) ])), ('firsttext', Pipeline([('tte',ThreadFirstTextExtractor()), ('cv',CountVectorizer(ngram_range=(1,2))), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1.3, 'text': 1, 'firsttext': 0.9, 'envelope': 0.2 } )), ('mlc', MLPClassifier(hidden_layer_sizes=(100,100,100))) ]) elif p=="pipe2c": p = Pipeline([ ('union', FeatureUnion(transformer_list=[ ('subject', Pipeline([('tse', ThreadSubjectExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('text', Pipeline([('tte',ThreadTextExtractor()), ('cv',CountVectorizer()), ('tfidf', TfidfTransformer()) ])), ('envelope', Pipeline([('tde', ThreadDictExtractor()), ('dv',DictVectorizer()) ])) ], transformer_weights={ 'subject': 1, 'text': 1, 'envelope': 0.4 } )), ('mlc', MLPClassifier()) ]) else: raise ValueError("The pipe %s is not a valid pipe") return p