In [23]:
# Re-run this freshen your external functions.
from imp import reload
import functions; reload(functions)

# Edit False to True and run this to update your goodproxies.txt
update_proxies = False
if update_proxies:    
    import pipulate.update_proxies as up
    up.Main()
    
# Send a keyword to the serp function. Always use a list (as if a spreadsheet row).
alist = functions.serp(['Dr Pepper'])
alist


Use proxies: False
Keyword: Dr Pepper
Out[23]:
[]

In [24]:
import pipulate as gs
import pandas as pd
sheet_name = 'Search Results'
sheet = gs.name(sheet_name)
tab = sheet.sheet1


---------------------------------------------------------------------------
APIError                                  Traceback (most recent call last)
<ipython-input-24-cbcc92235756> in <module>()
      2 import pandas as pd
      3 sheet_name = 'Search Results'
----> 4 sheet = gs.name(sheet_name)
      5 tab = sheet.sheet1

~\Anaconda3\lib\site-packages\pipulate\__init__.py in name(name)
    145 def name(name):
    146     """Return instance of GSheet by document name"""
--> 147     return oauth().open(name)
    148 
    149 

~\Anaconda3\lib\site-packages\gspread\client.py in open(self, title)
    120             properties = finditem(
    121                 lambda x: x['name'] == title,
--> 122                 self.list_spreadsheet_files()
    123             )
    124 

~\Anaconda3\lib\site-packages\gspread\client.py in list_spreadsheet_files(self)
     94                 params['pageToken'] = page_token
     95 
---> 96             res = self.request('get', url, params=params).json()
     97             files.extend(res['files'])
     98             page_token = res.get('nextPageToken', None)

~\Anaconda3\lib\site-packages\gspread\client.py in request(self, method, endpoint, params, data, json, files, headers)
     77             return response
     78         else:
---> 79             raise APIError(response)
     80 
     81     def list_spreadsheet_files(self):

APIError: {
 "error": {
  "errors": [
   {
    "domain": "global",
    "reason": "insufficientPermissions",
    "message": "Insufficient Permission: Request had insufficient authentication scopes."
   }
  ],
  "code": 403,
  "message": "Insufficient Permission: Request had insufficient authentication scopes."
 }
}

In [ ]:
# This applys the serp function with step-by-stride for long-list sanity.
row1 = 1
for i, x in enumerate(tab.col_values(2)):
    if not x:
        row1 = i+1
        break

rows = (row1, 100)
cols = ('a', 'b')
stride = 1
sheet =gs.name(sheet_name)
tab = sheet.sheet1
cl, df = gs.pipulate(tab, rows, cols)
steps = rows[1] - rows[0] + 1
for i in range(steps):
    row = i % stride
    if not row:
        r1 = rows[0] + i
        r2 = r1 + stride - 1
        rtup = (r1, r2)
        print('Cells %s to %s:' % rtup)
        cl, df = gs.pipulate(tab, rtup, cols)
        df['B'] = df.apply(kung.serp, axis=1)
        gs.populate(tab, cl, df)

In [ ]:
# Now we extract the position each keyword was in for the desired URL
steps = rows[1] - rows[0] + 1
for i in range(steps):
    row = i % stride
    if not row:
        r1 = rows[0] + i
        r2 = r1 + stride - 1
        rtup = (r1, r2)
        print('Cells %s to %s:' % rtup)
        cl, df = gs.pipulate(tab, rtup, cols)
        df['D'] = df.apply(kung.extract_pos, axis=1)
        gs.populate(tab, cl, df)