How to improve accuracy of classification

I am trying to build a model which can have higher accuracy more than 90%, I around 6000 records in my train sample, in test I am testing results for approximate 250 records.

for these train sample documents have been labeled with categories, in current data total 9 categories available. Below is distribution of categories in train data
Category 1 6% Category 2 12% Category 3 28% Category 4 0% Category 5 4% Category 6 9% Category 7 1% Category 8 26% Category 9 15%

I have written below script which uses availble description and create corpus on the same later this corpus is used to train a new "multinomial naive Bayes" classifier. I used this trained model to predict categories of test data set , but able to achive accuracy close to 67% only. I wanted to check what are possible options which can help to improve accuracy or any alternative ways to achieve accuracy more than 90%.

import os
import pandas as pd       
from bs4 import BeautifulSoup
import re
import nltk
from nltk.corpus import stopwords # Import the stop word list
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.grid_search import GridSearchCV
import pickle

##Read history data file
train = pd.read_csv("Past.csv",encoding='cp1252')

##Text Cleanng keeping only key words/ stemmming 
stemmer = SnowballStemmer('english')
def Description_to_words(raw_Description):
    #1. Remove HTML.    
    Description_text = BeautifulSoup(raw_Description).get_text() 
    #2. Remove non-letters: 
    #letters_only = re.sub("[^\w\s]", " ", Description_text)
    letters_only = re.sub("[^a-zA-Z]", " ", Description_text)
    #3. Convert to lower case
    words = word_tokenize(letters_only.lower())    
    #4. Remove stop words
    stops = set(stopwords.words("english")) 
    meaningful_words = [w for w in words if not w in stops]
    #5Stem words. Another issue. Stem meaningful_words, not words.
    return( " ".join(stemmer.stem(w) for w in meaningful_words))

# Get the number of Descriptions based on the dataframe column size
num_Descriptions = train["Description"].size
clean_train_Descriptions = []
for i in range( 0, num_Descriptions ):
    if( (i+1)%1000 == 0 ):
        print("Description %d of %d\n" % ( i+1, num_Descriptions ))
    clean_train_Descriptions.append(Description_to_words( train["Description"][i] ))

vectorizer = TfidfVectorizer(analyzer = "word",   \
                             tokenizer = None,    \
                             preprocessor = None, \
                             stop_words = None,   \
                             max_features = 5000,   \
                             ngram_range=(1,4),min_df =3) 
train_data_features = vectorizer.fit_transform(clean_train_Descriptions)

train_data_features = train_data_features.toarray()

nb = MultinomialNB(), train["Category"] )

# Read the test data
test = pd.read_csv("C:/Assignment/Assignment-Group-Prediction/IPM/Future.csv",encoding='cp1252')

# Create an empty list and append the clean Descriptions one by one
num_Descriptions = len(test["Description"])
clean_test_Descriptions = [] 
print("Cleaning and parsing the test set movie Descriptions...\n")
for i in range(0,num_Descriptions):
    if( (i+1) % 1000 == 0 ):
        print("Description %d of %d\n" % (i+1, num_Descriptions))
    clean_Description = Description_to_words( test["Description"][i] )
    clean_test_Descriptions.append( clean_Description )

test_data_features = vectorizer.transform(clean_test_Descriptions)
test_data_features = test_data_features.toarray()
resultNB = nb.predict(test_data_features)

# Copy the results to a pandas dataframe 
output = pd.DataFrame( data={"id":test["TicketNum"], "Category":test["Category"], "CategoryNB":resultNB} )

# Use pandas to write the comma-separated output file
output.to_csv( "Results.csv", index=False, quoting=3 )