In [4]:
import data_io
from features import FeatureMapper, SimpleTransform
import numpy as np
import pickle
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
import re


from nltk.corpus import stopwords
from nltk import word_tokenize          
from nltk.stem import WordNetLemmatizer 

cachedStopWords = stopwords.words("english") #cache stop words to speed-up removing them.

class LemmaTokenizer(object):
    def __init__(self):
        self.wnl = WordNetLemmatizer()
    def __call__(self, doc):
        return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]

def pre_processor(text):
        return re.sub(r'[^0-9a-zA-Z]+', " ", text).lower()
    
vectorizor = CountVectorizer(max_features=100,tokenizer=LemmaTokenizer(),preprocessor=pre_processor)
def feature_extractor():
    features = [('FullDescription-Bag of Words', 'FullDescription', vectorizor),
                ('Title-Bag of Words', 'Title', vectorizor),
                ('LocationRaw-Bag of Words', 'LocationRaw', vectorizor),
                ('LocationNormalized-Bag of Words', 'LocationNormalized', vectorizor)]
    combined = FeatureMapper(features)
    return combined

def get_pipeline():
    features = feature_extractor()
    steps = [("extract_features", features),
             ("classify", RandomForestRegressor(n_estimators=50, 
                                                verbose=2,
                                                n_jobs=4,
                                                min_samples_split=30,
                                                random_state=3465343))]
    return Pipeline(steps)

In [5]:
print("Reading in the training data")
train = data_io.get_train_df()

print("Extracting features and training model")
classifier = get_pipeline()
classifier.fit(train, train["SalaryNormalized"])

print("Saving the classifier")
data_io.save_model(classifier)


Reading in the training data
Extracting features and training model
building tree 1 of 50
building tree 2 of 50
building tree 3 of 50
building tree 4 of 50
building tree 5 of 50
building tree 6 of 50
building tree 7 of 50
building tree 8 of 50
building tree 9 of 50
building tree 10 of 50
building tree 11 of 50
building tree 12 of 50
building tree 13 of 50
building tree 14 of 50
building tree 15 of 50
building tree 16 of 50
building tree 17 of 50
building tree 18 of 50
building tree 19 of 50
building tree 20 of 50
building tree 21 of 50
building tree 22 of 50
building tree 23 of 50
building tree 24 of 50
building tree 25 of 50
building tree 26 of 50
building tree 27 of 50
building tree 28 of 50
building tree 29 of 50
building tree 30 of 50
building tree 31 of 50
building tree 32 of 50
building tree 33 of 50
building tree 34 of 50
building tree 35 of 50
building tree 36 of 50
building tree 37 of 50
building tree 38 of 50
[Parallel(n_jobs=4)]: Done  33 tasks      | elapsed:  4.4min
building tree 39 of 50
building tree 40 of 50
building tree 41 of 50
building tree 42 of 50
building tree 43 of 50
building tree 44 of 50
building tree 45 of 50
building tree 46 of 50
building tree 47 of 50
building tree 48 of 50
building tree 49 of 50
building tree 50 of 50
[Parallel(n_jobs=4)]: Done  50 out of  50 | elapsed:  6.2min finished
Saving the classifier

In [8]:
print("Making predictions")
valid = data_io.get_valid_df()
predictions = classifier.predict(valid)
predictions = predictions.reshape(len(predictions), 1)

print("Writing predictions to file")
data_io.write_submission(predictions)


Making predictions
[Parallel(n_jobs=4)]: Done  33 tasks      | elapsed:    0.1s
[Parallel(n_jobs=4)]: Done  50 out of  50 | elapsed:    0.1s finished
Writing predictions to file

In [37]:
my_job = valid.loc[valid.Id==13656201]
my_job.Title = "Mathematical epidemiology PostDoctoral Fellow"
my_job.FullDescription = "Mathematical epidemiology. Modelling of infectious diseases. Must be proficient in Matlab, R or Python."
my_job.Company = "University of Warwick"
my_job.LocationRaw = "Coventry"
my_job.LocationNormalized = "Coventry"
predictions = classifier.predict(my_job)
print(predictions)


[ 14961.33308984]
[Parallel(n_jobs=4)]: Done  33 tasks      | elapsed:    0.0s
[Parallel(n_jobs=4)]: Done  50 out of  50 | elapsed:    0.0s finished