In [1]:
#Prepare our constants.
ONE_WEEK_IN_SECONDS = 7 * 86400
VOTE_SCORE = 432
ARTICLES_PER_PAGE = 25

In [2]:
def article_vote(conn, user, article):
    cutoff = time.time() - ONE_WEEK_IN_SECONDS  #Calculate the cutoff time for voting.
    if conn.zscore('time:', article) < cutoff:  #Check to see if the article can still be voted on
        return
    
    article_id = article.partition(':')[-1]  #Get the id portion from the article:id identifier.
    #If the user hasn’t voted for this article before, increment the article score and vote count.
    if conn.sadd('voted:' + article_id, user): 
        conn.zincrby('score:', article, VOTE_SCORE)
        conn.hincrby(article, 'votes', 1)
        
        
def post_article(conn, user, title, link):
    article_id = str(conn.incr('article:'))  #Generate a new article id.
    voted = 'voted:' + article_id
    
    #Start with the posting user having voted for the article, and set the
    #article voting information to  automatically expire in a week
    conn.sadd(voted, user)
    conn.expire(voted, ONE_WEEK_IN_SECONDS)
    
    now = time.time()
    article = 'article:' + article_id
    #Create the article hash.
    conn.hmset(article, {
    'title': title,
    'link': link,
    'poster': user,
    'time': now,
    'votes': 1,
    })
    
    #Add the article to the time and score ordered ZSETs.
    conn.zadd('score:', article, now + VOTE_SCORE)
    conn.zadd('time:', article, now)
    
    return article_id


def get_articles(conn, page, order='score:'):
    
    #Set up the start and end indexes for fetching the articles
    start = (page-1) * ARTICLES_PER_PAGE
    end = start + ARTICLES_PER_PAGE - 1
    
    #Fetch the article ids.
    ids = conn.zrevrange(order, start, end)
    
    #Get the article information from the list of article ids.
    articles = []
    for id in ids:
        #Get the article information from the list of article ids.
        article_data = conn.hgetall(id)
        article_data['id'] = id
        articles.append(article_data)
    
    return articles

def add_remove_groups(conn, article_id, to_add=[], to_remove=[]):
    article = 'article:' + article_id #Construct the article information like we did in post_article.
    for group in to_add:
        #Add the article to groups that it should be a part of.
        conn.sadd('group:' + group, article)
    for group in to_remove:
        #Remove the article from groups that it should be removed from.
        conn.srem('group:' + group, article)
        
        
def get_group_articles(conn, group, page, order='score:'):
    #Create a key for each group and each sort order.
    key = order + group
    #If we haven’t sorted these articles recently, we should sort them.
    if not conn.exists(key):
        #Actually sort the articles in the group based on score or recency
        conn.zinterstore(key,
            ['group:' + group, order],
            aggregate='max',
        )
        conn.expire(key, 60) #Tell Redis to automatically expire the ZSET in 60 seconds.
    
    #Call our earlier get_articles() function to handle pagination and article data fetching.
    return get_articles(conn, page, key)

In [ ]: