In [1]:
from collections import OrderedDict

import numpy as np
import pandas as pd
from IPython.display import display

from fancy_table import fancy_table


/home/christian/.virtualenvs/vpr-x64/local/lib/python2.7/site-packages/matplotlib/__init__.py:740: UserWarning: Found matplotlib configuration in ~/.matplotlib/. To conform with the XDG base directory standard, this configuration location has been deprecated on Linux, and the new location is now '/home/christian/.config'/matplotlib/. Please move your configuration there to ensure that matplotlib will continue to find it in the future.
  _get_xdg_config_dir())

In [2]:
def tour_cost(tour):
    left_city = tour[range(-1, cities_count - 1)]
    left_pairs = np.array([tour, left_city])
    left_pair_costs = np.array([distances[tuple(p)] for p in left_pairs.T])
    return left_pair_costs.sum()


def city_costs(tour):
    left_city = tour[range(-1, cities_count - 1)]
    right_city = tour[range(-cities_count + 1, 1)]
    left_pairs = np.array([tour, left_city])
    right_pairs = np.array([tour, right_city])
    left_pair_costs = np.array([distances[tuple(p)] for p in left_pairs.T])
    right_pair_costs = np.array([distances[tuple(p)]
                                 for p in right_pairs.T])
    return left_pair_costs + right_pair_costs


def city_delta_costs(tour, proposed_moves):
    pre_costs = city_costs(tour)
    left_city = tour[np.arange(-1, cities_count - 1) + proposed_moves]
    right_city = tour[np.arange(-cities_count + 1, 1) + proposed_moves]
    left_pairs = np.array([tour, left_city])
    right_pairs = np.array([tour, right_city])
    left_pair_costs = np.array([distances[tuple(p)] for p in left_pairs.T])
    right_pair_costs = np.array([distances[tuple(p)]
                                 for p in right_pairs.T])
    return (left_pair_costs + right_pair_costs) - pre_costs

In [3]:
class MovePattern(object):
    def __init__(self, magnitude, shift=0):
        self.magnitude = magnitude
        self.double_magnitude = 2 * magnitude
        self.shift = shift

    def __getitem__(self, i):
        if self.magnitude == 0:
            return 0
        index = (i + 2 * self.double_magnitude -
                 ((self.shift + self.magnitude + 1) % self.double_magnitude))
        if (index % self.double_magnitude) < self.magnitude:
            return self.magnitude
        else:
            return -self.magnitude
        
        
def random_pattern_params(max_magnitude, extent, non_zero=True):
    max_magnitude = min(extent - 1, max_magnitude)
    magnitude = np.random.randint(0, max_magnitude + 1)
    while non_zero and magnitude == 0:
        magnitude = np.random.randint(0, max_magnitude + 1)

    shift = 0

    if magnitude > 0:
        max_shift = 2 * magnitude - 1
        if extent <= 2 * magnitude:
            max_shift = magnitude - 1
        shift = np.random.randint(max_shift + 1)

    return magnitude, shift

Algorithm summary

Implementation


In [4]:
seed = 1314

np.random.seed(seed)

cities_count = 5
distinct_pairs = [(i, j)
                  for i in xrange(cities_count)
                  for j in xrange(i + 1, cities_count)]
distances_dict = OrderedDict(zip(distinct_pairs,
                                 np.random.randint(0, 100,
                                                   cities_count *
                                                   (cities_count - 1) / 2)))
display(distances_dict.items())
distances = np.zeros((cities_count, cities_count), dtype=int)
# distances[map(list, distances_dict.keys())] = distances_dict.values()
distances[zip(*distances_dict.keys())] = distances_dict.values()
distances.T[zip(*distances_dict.keys())] = distances_dict.values()
distances


[((0, 1), 69),
 ((0, 2), 27),
 ((0, 3), 40),
 ((0, 4), 76),
 ((1, 2), 69),
 ((1, 3), 82),
 ((1, 4), 32),
 ((2, 3), 15),
 ((2, 4), 68),
 ((3, 4), 39)]
Out[4]:
array([[ 0, 69, 27, 40, 76],
       [69,  0, 69, 82, 32],
       [27, 69,  0, 15, 68],
       [40, 82, 15,  0, 39],
       [76, 32, 68, 39,  0]])

Create random tour


In [5]:
tsp_data = pd.DataFrame(np.arange(cities_count), columns=['tour-city'])
np.random.shuffle(tsp_data['tour-city'])

Identify left and right neighbour of each tour city


In [6]:
tsp_data['left-city'] = tsp_data['tour-city'].values[range(-1, cities_count - 1)]
tsp_data['right-city'] = tsp_data['tour-city'].values[range(-cities_count + 1, 1)]

Look-up cost to left and right neighbour for each tour city


In [7]:
tsp_data['left-cost'] = [distances[v['tour-city'], v['left-city']]
                         for k, v in tsp_data.iterrows()]
tsp_data['right-cost'] = [distances[v['tour-city'], v['right-city']]
                         for k, v in tsp_data.iterrows()]

In [8]:
tour_cost(tsp_data['tour-city'].values)


Out[8]:
249

Propose swaps between cities in the tour


In [9]:
pattern_params = random_pattern_params(cities_count - 1, cities_count - 1)
move_pattern = MovePattern(*pattern_params)
proposed_moves = [move_pattern[i] for i in xrange(cities_count)]
proposed_moves = [m if i + m >= 0 and i + m < cities_count else 0
                  for i, m in enumerate(proposed_moves)]
proposed_moves


Out[9]:
[3, 3, 0, -3, -3]

Assign cities belonging to a swap a common identifier


In [10]:
tsp_data['swap-id'] = np.array(
    [i if m > 0 else i + m if m < 0 else -1
     for i, m in enumerate(proposed_moves)])

Calculate estimated difference in cost for each swap


In [11]:
sorted_by_move_pair = np.argsort(tsp_data['swap-id'].values)
min_index = np.where(tsp_data['swap-id'].values
                     [sorted_by_move_pair] >= 0)[0][0]
min_index


Out[11]:
1

In [12]:
tsp_data['swap-id'].values[sorted_by_move_pair]


Out[12]:
array([-1,  0,  0,  1,  1])

In [13]:
tsp_data['delta-costs'] = city_delta_costs(tsp_data['tour-city'].values,
                                           proposed_moves)

In [14]:
tsp_data['swap-deltas'] = np.zeros_like(tsp_data['delta-costs'])

In [15]:
move_pair_ids =  (tsp_data['swap-id'].values
                  [sorted_by_move_pair[min_index::2]])

In [16]:
swap_deltas = (
    tsp_data['delta-costs'].values[sorted_by_move_pair[min_index::2]] +
    tsp_data['delta-costs'].values[sorted_by_move_pair[min_index + 1::2]])
tsp_data['swap-deltas'] = (
    swap_deltas[tsp_data['swap-id'].values])

Assess each swap (currently greedy)


In [17]:
tsp_data['assessment'] =  ((tsp_data['swap-deltas'].values <= 0) &
                           (tsp_data['swap-id'] >= 0))

Apply accepted swaps to produce new tour


In [18]:
tsp_data['new-city'] = (tsp_data['tour-city'].values
                        [np.arange(cities_count) *
                         ~tsp_data['assessment'].values
                         + (np.arange(cities_count) + proposed_moves) *
                         tsp_data['assessment'].values])

In [19]:
proposed_moves


Out[19]:
[3, 3, 0, -3, -3]

In [20]:
# Note: This "fancy-table" is taken from [here][1].  Filtering doesn't seem to work on
# the table here... perhaps filtering only works on text columns?
#
# [1]: http://www.snip2code.com/Snippet/47354/Fancy-Table-Using-IPython-Widgets
fancy_table(tsp_data, start_cols=['tour-city'])


tour-city left-city right-city left-cost right-cost swap-id delta-costs swap-deltas assessment new-city
0 2 0 4 27 68 0 1 -42 True 3
1 4 2 1 68 32 1 7 36 False 4
2 1 4 3 32 82 -1 0 36 False 1
3 3 1 0 82 40 0 -43 -42 True 2
4 0 3 2 40 27 1 29 36 False 0

5 rows × 10 columns

Algorithm summary