In [ ]:
# Copyright 2020 Google LLC

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at

#     https://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

DV360 Automation Workshop: notebook

Author: Matt Lynam

Objective

Enable Display & Video 360 (DV360) advertisers to increase workflow efficiency by utilising the right automation solution according to their needs, resources and technical capability.

Goals

  • Provide an overview of the current automation suite available in DV360
  • Demonstrate the capabilities and limitations of DV360's UI and APIs
  • Explore common advertiser use cases and pitfalls
  • Acquire hands-on experience by applying key concepts using a fictional case study

0) Setup and authentication

Google Colab primer

Google Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with:

  • Zero configuration required
  • Free access to GPUs
  • Easy sharing & colaboration

A notebook is a list of cells, containing either explanatory text or executable code and its output. This is a text cell.

Useful Colab tips

  • Double-click within the cell to edit
  • Code cells can be executed by clicking the Play icon in the left gutter of the cell; or with Cmd/Ctrl + Enter to run the cell in place;
  • Use Cmd/Ctrl + / to comment out a line of code

0.1 Install Python client libraries

Run the following block to install the latest Google Python Client Library and import additional libraries used for this workshop.


In [ ]:
!pip install google-api-python-client
!pip install google-cloud-vision

import io
import datetime
import json
import csv
import pprint
import requests
import pandas as pd

from google.colab import files
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import discovery
from googleapiclient import http
from google.api_core import retry
from google.cloud import vision
from google.cloud import storage

print("Successfully imported Python libraries!")

0.2 Setup your GCP project

To utilise the DV360 API, you need a Google Cloud project. For the purpose of this workshop, we've done this for you, but normally you'd have to complete the following steps, before you can make requests using the DV360 API:

  1. Select or create a Google Cloud Platform project.

  2. Enable billing on your project.

  3. Enable the 'Display & Video 360' and 'DoubleClick Bid Manager' API from the API library

Create GCP credentials

We've also generated credentials for you, but if you needed to generate new credentials, this would be the process:

  1. Go to the API credentials page in the Cloud Platform Console.
  2. Fill out the required fields on the OAuth consent screen.
  3. On the credentials page, click Create credentials >> OAuth client ID.

  4. Select Other as the application type, and then click Create.
  5. Download the credentials by clicking the Download JSON button

Reference: https://developers.google.com/display-video/api/guides/how-tos/authorizing

0.3 Authentication

Next, we'll permission the application to submit authorised API requests on our behalf using OAuth authentication.

The following scopes are specified in an array:

Reference:


In [ ]:
API_SCOPES = ['https://www.googleapis.com/auth/doubleclickbidmanager',
            'https://www.googleapis.com/auth/display-video',
            'https://www.googleapis.com/auth/devstorage.read_only',
            'https://www.googleapis.com/auth/cloud-vision']

# Authenticate using user credentials stored in client_secrets.json
client_secrets_file = files.upload()
client_secrets_json = json.loads(next(iter(client_secrets_file.values())))

flow = InstalledAppFlow.from_client_config(client_secrets_json, API_SCOPES)
credentials = flow.run_console()
print("Success!")

# Build DBM Read API service object
dbm_service = discovery.build('doubleclickbidmanager', 'v1.1', credentials=credentials)
print("DBM API service object created")

# Build Google Cloud Storage Read API service object
gcs_service = discovery.build('storage', 'v1', credentials=credentials)
print("GCS service object created")

# Create Display Video API service object
display_video_service = discovery.build('displayvideo', 'v1', credentials=credentials)
print("Display Video API service object created")

0.4 Set DV360 account settings

Next, we need to set our DV360 parameters, and generate a sandbox (test) campaign.

Note, if you'd prefer to use an existing campaign, update CAMPAIGN_ID below.


In [ ]:
PARTNER_ID = "234340" #@param {type:"string"}
ADVERTISER_ID = "2436036" #@param {type:"string"}
CAMPAIGN_ID = "4258803" #@param {type:"string"}

# For use with legacy DBM API
SDF_VERSION = "5.1" #@param {type:"string"}

# For use with DV360 API
SDF_VERSION_DV360 = "SDF_VERSION_5_1" #@param {type:"string"}

print("DV360 settings saved!")

Create a new 'sandbox' campaign to use with the rest of the exercises

Executing the following code block will overwrite any CAMPAIGN_ID used above.


In [ ]:
YOUR_NAME = "Matt" #@param {type:"string"}

# Set dates for new campaign
month = datetime.datetime.today().strftime('%m')
day = datetime.datetime.today().strftime('%d')
year = datetime.datetime.today().strftime('%Y')
month_plus30 = (datetime.datetime.today() + datetime.timedelta(days=30)).strftime('%m')
day_plus30 = (datetime.datetime.today() + datetime.timedelta(days=30)).strftime('%d')
year_plus30 = (datetime.datetime.today() + datetime.timedelta(days=30)).strftime('%Y')

def create_campaign(YOUR_NAME):
  """ Creates a new DV360 Campaign object."""
  
  campaign_name = f"{year}-{month}-{day} | {YOUR_NAME}"
  campaign_obj = {
    'displayName': campaign_name,
    'entityStatus': 'ENTITY_STATUS_ACTIVE',
    'campaignGoal': {
        'campaignGoalType' : 'CAMPAIGN_GOAL_TYPE_ONLINE_ACTION',
        'performanceGoal': {
            'performanceGoalType': 'PERFORMANCE_GOAL_TYPE_CPC',
            'performanceGoalAmountMicros': 1000000
        }
    },
    'campaignFlight': {
        'plannedSpendAmountMicros': 1000000,
        'plannedDates': {
            'startDate': {
                'year': year,
                'month': month,
                'day': day
            },
            'endDate': {
                'year': year_plus30,
                'month': month_plus30,
                'day': day_plus30
            }
        }
    },
    'frequencyCap': {
        'maxImpressions': 10,
        'timeUnit': 'TIME_UNIT_DAYS',
        'timeUnitCount': 1
    }
  }

  # Create the campaign.
  campaign = display_video_service.advertisers().campaigns().create(
    advertiserId=ADVERTISER_ID,
    body=campaign_obj
  ).execute()
  
  return campaign

campaign = create_campaign(YOUR_NAME)

# Display the new campaign.
CAMPAIGN_ID = campaign['campaignId']
print(f"\nCampaign '{campaign['name']}' was created."
      f"\nCampaign id: '{campaign['campaignId']}'"
      f"\nCampaign name: '{campaign['displayName']}'"
      f"\nCampaign status: '{campaign['entityStatus']}'")

1A) SDF using DBM API

What are Structured Data Files (SDF)? SDF are a way of using spreadsheets to make bulk changes to DV360 entities, including Campaigns, Insertion Orders, Line Items, TrueView Ad Groups, TrueView Ads and deals. SDF are the first step on the path to full automation in DV360, but only allow you to automate so far, as we'll explore now...


Note: as part of the Display & Video 360 API effort, the SDF resource (sdf.download) is migrating to a new endpoint (displayvideo.googleapis.com). The current endpoint (https://www.googleapis.com/doubleclickbidmanager/) will soon be sunset (anticipated Sept 2020). If you wish to use the legacy SDF resource (DBM API) you should follow track 1A, otherwise follow track 1B for examples using the DV360 API.

1.1 Manually create SDF

  1. Create a copy of the Google Store product feed
  2. Update the highlighted cells (B2:B3) on the tab called "sdf_insertionorders"
  3. Save the updated "sdf_insertionorders" tab and "sdf_lineitems" tab to .CSV (File >> Download >> CSV)
  4. Upload the two .CSV files together in the DV360 UI

This will create a very basic campaign, with 2 insertion orders, and 10 lineitems per insertion order.

1.2 Editing SDF programmatically

Our new LineItems are missing some important targeting and inventory controls:

  • Channels (e.g. groups of publisher URLs)
  • Inventory source
  • Brand safety
  • Geo targeting

Let’s use software to make these changes for us...


In [ ]:
# Configure the sdf.download request
request_body={
    "fileTypes": ["LINE_ITEM"],
    "filterType": "CAMPAIGN_ID",
    "filterIds" : [CAMPAIGN_ID],
    "version": SDF_VERSION
}

# Make the request to download all SDF LineItems for your new campaign
request = dbm_service.sdf().download(body=request_body)
response = request.execute()

# Load SDF response to Pandas DataFrame
sdf_df = pd.read_csv(io.StringIO(response['lineItems']))

# Show sample (5 rows) of DataFrame
sdf_df.head()

Define a boilerplate targeting template that all Line Items should adhere too


In [ ]:
targeting_template = {
  'Channel Targeting - Include': '2580510;',
  'Channel Targeting - Exclude': '2580509;',
  'Inventory Source Targeting - Include': '1;',
  'Inventory Source Targeting - Exclude': '6; 8; 9; 10; 2; 11; 12; 13; 16; 20; '
    +'23; 27; 29; 30; 31; 34; 35; 36; 38; 43; 46; 50; 51; 56; 60; 63; 67; 74;',
  'Digital Content Labels - Exclude': 'G; PG; T;',
  'Brand Safety Sensitivity Setting': 'Use custom',
  'Brand Safety Custom Settings': "Adult; Alcohol; Derogatory; "
      + "Downloads & Sharing; Drugs; Gambling; Politics; Profanity; Religion; "
      + "Sensitive social issues; Suggestive; Tobacco; Tragedy; "
      + "Transportation Accidents; Violence; Weapons;"
  }

Modify latest SDF LineItems file and update the columns according to the targeting template


In [ ]:
# Overwrite targeting columns using 'targeting_template'
sdf_df['Channel Targeting - Include'] = targeting_template['Channel Targeting - Include']
sdf_df['Channel Targeting - Exclude'] = targeting_template['Channel Targeting - Exclude']
sdf_df['Inventory Source Targeting - Include'] = targeting_template['Inventory Source Targeting - Include']
sdf_df['Inventory Source Targeting - Exclude'] = targeting_template['Inventory Source Targeting - Exclude']
sdf_df['Digital Content Labels - Exclude'] = targeting_template['Digital Content Labels - Exclude']
sdf_df['Brand Safety Sensitivity Setting'] = targeting_template['Brand Safety Sensitivity Setting']
sdf_df['Brand Safety Custom Settings'] = targeting_template['Brand Safety Custom Settings']

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update1_controls.csv', index=False)

# Show sample (5 rows) of DataFrame
sdf_df.head()

In [ ]:
# Download modified csv to local storage
files.download('sdf_update1_controls.csv')

print("Success, check your downloads folder for a file called 'sdf_update1_controls.csv'")

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.3 SDF + Entity Read Files

What are Entity Read Files (ERFs)? ERFs are flat files (.JSON) in Google Cloud Storage that contain lookup values for DV360 entities like geographies, creatives, etc. Each DV360 entity (Advertiser, Campaign, LineItem, etc) has a corresponding .JSON file in Cloud Storage retained free-of-charge for 60 days from their processing date.

ERFs consist of 1 file per entity type, written x1 per day to two seperate Cloud buckets:

  1. Public (10 .JSON files) - contain common public data such as GeoLocation and Language which are stored in the gdbm-public bucket (the same bucket for every DV360 user).
  2. Private (13 .JSON files) - contain information about the DV360 Partner's campaigns, creatives, budgets and other private data and are stored in Partner-specific buckets (restricted to specific users)

Reference: https://developers.google.com/bid-manager/guides/entity-read/overview

ERFs can be used to speed up, and automate, the creation of SDF files.

Let's explore this now...

Download yesterday's GeoLocation.json from public ERF bucket using Google Cloud Storage API


In [ ]:
yesterday = datetime.date.today() - datetime.timedelta(2) # Actually today-2 to avoid issues with collection

# Download public ERF for geolocation info
request = gcs_service.objects().get_media(
    bucket="gdbm-public",
    object='entity/' + yesterday.strftime('%Y%m%d') + '.0.GeoLocation.json')

response = request.execute()
geolocations = json.loads(response)

print("GeoLocation.json successfully downloaded \n")
print("Here's a random sample of 5 entries:\n")
pprint.pprint(geolocations[0:5])

Retrieve a list of country codes / IDs from GeoLocation.json for each of our store locations


In [ ]:
# Provide a list of store locations
store_locations = ['United Kingdom', 'France', 'Spain', 'Germany', 'Portugal']

# Create a new dictionary to save the country code and ID later on
geo_targeting_ids = {}

# Note: GeoLocation.json is over 800,000 lines
for location in geolocations:
  if location['canonical_name'] in store_locations:
    geo_targeting_ids[location['country_code']] = location['id']
    print(location)
    
print(geo_targeting_ids)

Download the latest SDF LineItems (because we've made changes since our last download)


In [ ]:
# Configure the sdf.download request
request_body={
    "fileTypes": ["LINE_ITEM"],
    "filterType": "CAMPAIGN_ID",
    "filterIds" : [CAMPAIGN_ID],
    "version": SDF_VERSION
}

# Make the request to download all SDF LineItems for your new campaign
request = dbm_service.sdf().download(body=request_body)
response = request.execute()

# Load SDF response to Pandas DataFrame
sdf_df = pd.read_csv(io.StringIO(response['lineItems']))

# Show sample (5 rows) of DataFrame
sdf_df.head()

Modify the contents of the latest SDF output, then save a new CSV with updated Geo Targeting IDs


In [ ]:
for country in geo_targeting_ids:
  target_country = geo_targeting_ids[country]
  sdf_df.loc[sdf_df.Name.str.contains(country), 'Geography Targeting - Include'] = f'{target_country};'

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update2_geo.csv', index=False)

# Display updated DataFrame
sdf_df.head()

In [ ]:
# Download modified csv to local storage
files.download('sdf_update2_geo.csv')

print("Success, look for the file called 'sdf_update2_geo.csv' in your downloads folder")

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.4 SDF + Cloud Vision API

Next, let's look at how we you can utilise external APIs.

  1. Download the 'product_feed' tab from Google Store as CSV (File >> Download >> CSV)
  2. Execute the following code block and upload 'product_feed.csv'
  3. This will create a new Python dictionary (key:value pairing), mapping SKUs with their image link

Warning: Cloud Vision API is paid product, utilising the following example in your own Cloud project will incur costs.

Try out the Cloud Vision API for free at cloud.google.com/vision


In [ ]:
# Upload product feed using Colab's upload utility
product_feed_csv = files.upload()

contents = next(iter(product_feed_csv.values())).decode('utf-8')
products = csv.DictReader(io.StringIO(contents))

image_url_list = {}

# Iterate through each row and update dict() with sku:link
for row in products:
  image_url_list[row['sku']] = row['image_link']

pprint.pprint(image_url_list)

Define a function to send images to the Cloud Vision API


In [ ]:
def vision_analysis(image_url):
  """Process images using the Cloud Vision API"""
  
  # Assign image URL
  image = vision.types.Image()
  image.source.image_uri = image_url

  # Instantiates a Vision client
  client = vision.ImageAnnotatorClient(credentials=credentials)

  # Performs label detection on the image file
  response = client.label_detection(image=image)
  labels = response.label_annotations
  
  targeting_keywords = []

  for label in labels:
    targeting_keywords.append(label.description)
  
  return(targeting_keywords)

Run our images through the function, and return a lookup table


In [ ]:
imageslookup = {}

for sku, url in image_url_list.items():
    print(f"Vision analysis completed for: {url}")
    imageslookup[sku] = vision_analysis(url)

print("\nCreating lookup table:")
pprint.pprint(imageslookup, indent=4)

Now we have our new labels from the Vision API, we need to write these into the keywords targeting field


In [ ]:
# Configure the sdf.download request
request_body={
    "fileTypes": ["LINE_ITEM"],
    "filterType": "CAMPAIGN_ID",
    "filterIds" : [CAMPAIGN_ID],
    "version": SDF_VERSION
}

request = dbm_service.sdf().download(body=request_body)
response = request.execute()

# Load SDF response to Pandas DataFrame
sdf_df = pd.read_csv(io.StringIO(response['lineItems']))

for product in imageslookup:
  sdf_df.loc[sdf_df.Name.str.contains(product), \
      'Keyword Targeting - Include'] = ";".join(imageslookup[product]).lower()

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update3_keywords.csv', index=False)

# Show sample (5 rows) of DataFrame
sdf_df.head()

In [ ]:
# Download modified csv to local storage
files.download('sdf_update3_keywords.csv')

print("Success, look for the file called 'sdf_update3_keywords.csv' in your downloads folder")

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.5 Optimisation using Reports

Next, we'll look at how you could combine reporting data, with operations such as optimising bid multipliers or deactivating activity. Note: your new campaign has no performance history, so we'll use an existing campaign for this exercise.


In [ ]:
# Define DV360 report definition (i.e. metrics and filters)
report_definition = {
  'params': {
    'type': 'TYPE_GENERAL',
    'metrics': [
      'METRIC_IMPRESSIONS',
      'METRIC_CLICKS',
      'METRIC_CTR',
      'METRIC_REVENUE_ADVERTISER'
    ],
    'groupBys': [
      'FILTER_ADVERTISER',
      'FILTER_INSERTION_ORDER',
      'FILTER_LINE_ITEM',
      'FILTER_ADVERTISER_CURRENCY'
    ],
    'filters': [
      {
        'type': 'FILTER_ADVERTISER', 
        'value': ADVERTISER_ID
      }
    ],
  },
  "metadata": {
    "title": "DV360 Automation API-generated report",
    "dataRange": 'LAST_90_DAYS',
    "format": "csv"
  },
  "schedule": {
    'frequency': "ONE_TIME"
  }
}

# Create new query using report definition
operation = dbm_service.queries().createquery(body=report_definition).execute()
pprint.pprint(operation)

# Runs the given Queries.getquery request, retrying with an exponential
# backoff. Returns completed operation. Will raise an exception if the
# operation takes more than five hours to complete.
@retry.Retry(predicate=retry.if_exception_type(Exception), initial=5, maximum=60, deadline=18000)
def check_get_query_completion(getquery_request):
  response = getquery_request.execute()
  pprint.pprint(response)
  if response['metadata']['running']:
    raise Exception("The operation has not completed.")
  return response

getquery_request = dbm_service.queries().getquery(queryId=operation['queryId'])
response = check_get_query_completion(getquery_request)

In [ ]:
report_url = response['metadata']['googleCloudStoragePathForLatestReport']

# Use skipfooter to remove report footer from data
report_df = pd.read_csv(report_url, skipfooter=16, engine='python')
report_df.head(10)

In [ ]:
# Define our 'KPIs'
ctr_target = 0.15
imp_threshold = 10000

# Convert IDs to remove decimal point, then string
report_df['Line Item ID'] = report_df['Line Item ID'].apply(int)
poor_performers = report_df.query('Impressions > @imp_threshold & (Clicks / Impressions)*100 < @ctr_target')

# Convert results to Python list
poor_performers = list(poor_performers['Line Item ID'])

print(f"There are {len(poor_performers)} LineItems with a CTR"
      f" < {ctr_target}% and over {imp_threshold} impressions:"
      f"\n{poor_performers}")

Download an updated SDF LineItems file, and if the LineItem ID is in the poor performers list, add a Geo bid multiplier to half the bids (0.5)


In [ ]:
# Configure the sdf.download request
request_body={
    "fileTypes": ["LINE_ITEM"],
    "filterType": "CAMPAIGN_ID",
    "filterIds" : ['1914007'],
    "version": SDF_VERSION
}

request = dbm_service.sdf().download(body=request_body)
response = request.execute()

# Load SDF response to Pandas DataFrame
sdf_df = pd.read_csv(io.StringIO(response['lineItems']))

for li in poor_performers:
  geo = sdf_df.loc[sdf_df['Line Item Id'] == li, "Geography Targeting - Include"].iloc[0]
  sdf_df.loc[sdf_df['Line Item Id'] == li, "Bid Multipliers"] = f"(geo; {geo} 0.5;);"

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update4_bidmultipliers.csv', index=False)

# Display updated DataFrame
sdf_df.head()

In [ ]:
files.download('sdf_update4_bidmultipliers.csv')

print('Success, your new SDF file has been downloaded')

Note the only rows included in the output, are those that we want to modify.

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.6 Challenge

Challenge: update your campaign with both language and audience targeting.

  • All Lineitems should target the following Google audiences

    • Affinity Categories » Technology » Mobile Enthusiasts
    • Affinity Categories » Technology » Technophiles » High-End Computer Aficionado
    • In-Market Categories » Consumer Electronics
  • LineItems for France, should be targeted at French speakers

  • LineItems for Great Britain, should be targeted at English speakers

Tips

  • Google Audience IDs can be found in the DV360 UI or by downloading an SDF with an existing audience applied
  • Language IDs can be found in the Language.json ERF file or by downloading an SDF with the language already applied

In [ ]:
#TODO

Solution


In [ ]:
# Format today-2 in required date format
yesterday = (datetime.date.today() - datetime.timedelta(2)).strftime('%Y%m%d')

# Download ERF for Language.json from public GCS bucket
request = gcs_service.objects().get_media(
    bucket="gdbm-public",
    object='entity/' + yesterday + '.0.Language.json')

response = request.execute()
languages = json.loads(response)

language_targets = ['en', 'fr']
lang_targeting_ids = {}

# Search language.json for language targets 'en' and 'fr'
for lang in languages:
  if lang['code'] in language_targets:
    lang_targeting_ids[lang['code']] = lang['id']
    print(lang)
    
print(lang_targeting_ids)

# Define targeting template
targeting_template = {
 'Affinity & In Market Targeting - Include': '4569529;4586809;4497529;',
 }

# Configure the sdf.download request
request_body={
    "fileTypes": ["LINE_ITEM"],
    "filterType": "CAMPAIGN_ID",
    "filterIds" : [CAMPAIGN_ID],
    "version": SDF_VERSION
}

request = dbm_service.sdf().download(body=request_body)
response = request.execute()

# Load SDF response to Pandas DataFrame
sdf_df = pd.read_csv(io.StringIO(response['lineItems']))

# Update DataFrame with Language and Audience targeting
sdf_df.loc[sdf_df.Name.str.contains("GB"), "Language Targeting - Include"] = f"{lang_targeting_ids['en']};"
sdf_df.loc[sdf_df.Name.str.contains("FR"), "Language Targeting - Include"] = f"{lang_targeting_ids['fr']};"
sdf_df['Affinity & In Market Targeting - Include'] = targeting_template['Affinity & In Market Targeting - Include']

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update5_challenge.csv', index=False)

# Display updated DataFrame
sdf_df.head()

In [ ]:
# Download file to disk using Colab syntax
files.download('sdf_update5_challenge.csv')

print("Success, check your downloads folder for a file called 'sdf_update5_challenge.csv'")

Upload the output .csv file in the DV360 UI

1B) SDF using DV360 API

What are Structured Data Files (SDF)? SDF are a way of using spreadsheets to make bulk changes to DV360 entities, including Campaigns, Insertion Orders, Line Items, TrueView Ad Groups, TrueView Ads and deals. SDF are the first step on the path to full automation in DV360, but only allow you to automate so far, as we'll explore now...


Note: as part of the Display & Video 360 API effort, the SDF resource (sdf.download) is migrating to a new endpoint (displayvideo.googleapis.com). The current endpoint (https://www.googleapis.com/doubleclickbidmanager/) will soon be sunset (anticipated Sept 2020). If you wish to use the legacy SDF resource (DBM API) you should follow track 1A, otherwise follow track 1B for examples using the DV360 API.

1.1 Manually create SDF

  1. Create a copy of the Google Store product feed
  2. Update the highlighted cells (B2:B3) on the tab called "sdf_insertionorders"
  3. Save the updated "sdf_insertionorders" tab and "sdf_lineitems" tab to .CSV (File >> Download >> CSV)
  4. Upload the two .CSV files together in the DV360 UI

This will create a very basic campaign, with 2 insertion orders, and 10 lineitems per insertion order.

1.2 Editing SDF programmatically

Our new LineItems are missing some important targeting and inventory controls:

  • Channels (e.g. groups of publisher URLs)
  • Inventory source
  • Brand safety
  • Geo targeting

Let’s use software to make these changes for us...

Create a function to download SDFs

As we'll be downloading multiple SDF files in the next exercises, we've created a function to handle to the download process for us.


In [ ]:
def download_sdf(request_body):
  """Download sdf file, extract .csv files from .zip, load 
  'SDF-LineItems.csv' to Pandas DataFrame object. """

  # Create the sdfdownloadtask
  operation = display_video_service.sdfdownloadtasks().create(
      body=sdf_body).execute();
      
  print("Operation %s was created." % operation["name"])

  # Configure the operations.get request
  getRequest = display_video_service.sdfdownloadtasks().operations().get(
      name=operation["name"]
      )

  # Runs the given operations.get request, retrying with an exponential
  # backoff. Returns completed operation. Will raise an exception if the
  # operation takes more than five hours to complete.
  @retry.Retry(predicate=retry.if_exception_type(Exception), 
               initial=5, maximum=60, deadline=18000)
  def checkSdfdownloadtaskCompletion(getRequest):
    operation = getRequest.execute()
    if "done" not in operation:
      raise Exception("The operation has not completed.")
    return operation

  # Get current status of operation with exponential backoff retry logic
  operation = checkSdfdownloadtaskCompletion(getRequest)

  # Check if the operation finished with an error and return
  if "error" in operation:
    raise Exception("The operation finished in error with code %s: %s" % (
                    operation["error"]["code"],
                    operation["error"]["message"]))

  print("The operation completed successfully. Resource %s was created."
        % operation["response"]["resourceName"])

  # Extract download file resource name to use in download request
  resource_name = operation["response"]["resourceName"]

  # Configure the Media.download request
  downloadRequest = display_video_service.media().download_media(
    resourceName=resource_name)

  output_file = f"{resource_name.replace('/','-')}.zip"

  # Create output stream for downloaded file
  outStream = io.FileIO(output_file, mode='wb')

  # Make downloader object
  downloader = http.MediaIoBaseDownload(outStream, downloadRequest)

  # Download media file in chunks until finished
  download_finished = False
  while download_finished is False:
    _, download_finished = downloader.next_chunk()

  print(f"File downloaded to {output_file}")

  # Load output into a Pandas dataframe
  df = pd.read_csv(output_file, compression='zip')
  return df

print("Download SDF function created")

Define a boilerplate targeting template that all Line Items should adhere too


In [ ]:
targeting_template = {
  'Channel Targeting - Include': '2580510;',
  'Channel Targeting - Exclude': '2580509;',
  'Inventory Source Targeting - Include': '1;',
  'Inventory Source Targeting - Exclude': '6; 8; 9; 10; 2; 11; 12; 13; 16; 20; '
    +'23; 27; 29; 30; 31; 34; 35; 36; 38; 43; 46; 50; 51; 56; 60; 63; 67; 74;',
  'Digital Content Labels - Exclude': 'G; PG; T;',
  'Brand Safety Sensitivity Setting': 'Use custom',
  'Brand Safety Custom Settings': "Adult; Alcohol; Derogatory; "
      + "Downloads & Sharing; Drugs; Gambling; Politics; Profanity; Religion; "
      + "Sensitive social issues; Suggestive; Tobacco; Tragedy; "
      + "Transportation Accidents; Violence; Weapons;"
  }

Modify latest SDF LineItems file and update the columns according to the targeting template


In [ ]:
# Configure the sdfdownloadtasks.create request
sdf_body = {
    "version": SDF_VERSION_DV360,
    "advertiserId": ADVERTISER_ID,
    "parentEntityFilter": {
        "fileType": ["FILE_TYPE_LINE_ITEM"],
        "filterType": "FILTER_TYPE_CAMPAIGN_ID",
        "filterIds": [CAMPAIGN_ID]
    }
}

# Fetch updated SDF lineitem
sdf_df = download_sdf(sdf_body)

# Overwrite targeting columns using 'targeting_template'
sdf_df['Channel Targeting - Include'] = targeting_template['Channel Targeting - Include']
sdf_df['Channel Targeting - Exclude'] = targeting_template['Channel Targeting - Exclude']
sdf_df['Inventory Source Targeting - Include'] = targeting_template['Inventory Source Targeting - Include']
sdf_df['Inventory Source Targeting - Exclude'] = targeting_template['Inventory Source Targeting - Exclude']
sdf_df['Digital Content Labels - Exclude'] = targeting_template['Digital Content Labels - Exclude']
sdf_df['Brand Safety Sensitivity Setting'] = targeting_template['Brand Safety Sensitivity Setting']
sdf_df['Brand Safety Custom Settings'] = targeting_template['Brand Safety Custom Settings']

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update1_controls.csv', index=False)

# Show sample (5 rows) of DataFrame
sdf_df.head()

In [ ]:
# Download modified csv to local storage
files.download('sdf_update1_controls.csv')

print("Success, check your downloads folder for a file called 'sdf_update1_controls.csv'")

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.3 SDF + Entity Read Files

What are Entity Read Files (ERFs)? ERFs are flat files (.JSON) in Google Cloud Storage that contain lookup values for DV360 entities like geographies, creatives, etc. Each DV360 entity (Advertiser, Campaign, LineItem, etc) has a corresponding .JSON file in Cloud Storage retained free-of-charge for 60 days from their processing date.

ERFs consist of 1 file per entity type, written x1 per day to two seperate Cloud buckets:

  1. Public (10 .JSON files) - contain common public data such as GeoLocation and Language which are stored in the gdbm-public bucket (the same bucket for every DV360 user).
  2. Private (13 .JSON files) - contain information about the DV360 Partner's campaigns, creatives, budgets and other private data and are stored in Partner-specific buckets (restricted to specific users)

Reference: https://developers.google.com/bid-manager/guides/entity-read/overview

ERFs can be used to speed up, and automate, the creation of SDF files.

Let's explore this now...

Download yesterday's GeoLocation.json from public ERF bucket using Google Cloud Storage API


In [ ]:
yesterday = datetime.date.today() - datetime.timedelta(2) # Actually today-2 to avoid issues with collection

# Download public ERF for geolocation info
request = gcs_service.objects().get_media(
    bucket="gdbm-public",
    object='entity/' + yesterday.strftime('%Y%m%d') + '.0.GeoLocation.json')

response = request.execute()
geolocations = json.loads(response)

print("GeoLocation.json successfully downloaded \n")
print("Here's a random sample of 5 entries:\n")
pprint.pprint(geolocations[0:5])

Retrieve a list of country codes / IDs from GeoLocation.json for each of our store locations


In [ ]:
# Provide a list of store locations
store_locations = ['United Kingdom', 'France', 'Spain', 'Germany', 'Portugal']

# Create a new dictionary to save the country code and ID later on
geo_targeting_ids = {}

# Note: GeoLocation.json is over 800,000 lines
for location in geolocations:
  if location['canonical_name'] in store_locations:
    geo_targeting_ids[location['country_code']] = location['id']
    print(location)
    
print(geo_targeting_ids)

Download the latest SDF LineItems (because we've made changes since our last download)


In [ ]:
# Configure the sdfdownloadtasks.create request
sdf_body = {
    "version": SDF_VERSION_DV360,
    "advertiserId": ADVERTISER_ID,
    "parentEntityFilter": {
        "fileType": ["FILE_TYPE_LINE_ITEM"],
        "filterType": "FILTER_TYPE_CAMPAIGN_ID",
        "filterIds": [CAMPAIGN_ID]
    }
}

sdf_df = download_sdf(sdf_body)
sdf_df.head()

Modify the contents of the latest SDF output, then save a new CSV with updated Geo Targeting IDs


In [ ]:
for country in geo_targeting_ids:
  target_country = geo_targeting_ids[country]
  sdf_df.loc[sdf_df.Name.str.contains(country), 'Geography Targeting - Include'] = f'{target_country};'

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update2_geo.csv', index=False)

# Display updated DataFrame
sdf_df.head()

In [ ]:
# Download modified csv to local storage
files.download('sdf_update2_geo.csv')

print("Success, look for the file called 'sdf_update2_geo.csv' in your downloads folder")

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.4 SDF + Cloud Vision API

Next, let's look at how we you can utilise external APIs.

  1. Download the 'product_feed' tab from Google Store as CSV (File >> Download >> CSV)
  2. Execute the following code block and upload 'product_feed.csv'
  3. This will create a new Python dictionary (key:value pairing), mapping SKUs with their image link

Warning: Cloud Vision API is paid product, utilising the following example in your own Cloud project will incur costs.

Try out the Cloud Vision API for free at cloud.google.com/vision


In [ ]:
# Upload product feed using Colab's upload utility
product_feed_csv = files.upload()

contents = next(iter(product_feed_csv.values())).decode('utf-8')
products = csv.DictReader(io.StringIO(contents))

image_url_list = {}

# Iterate through each row and update dict() with sku:link
for row in products:
  image_url_list[row['sku']] = row['image_link']

pprint.pprint(image_url_list)

Define a function to send images to the Cloud Vision API


In [ ]:
def vision_analysis(image_url):
  """Process images using the Cloud Vision API"""
  
  # Assign image URL
  image = vision.types.Image()
  image.source.image_uri = image_url

  # Instantiates a Vision client
  client = vision.ImageAnnotatorClient(credentials=credentials)

  # Performs label detection on the image file
  response = client.label_detection(image=image)
  labels = response.label_annotations
  
  targeting_keywords = []

  for label in labels:
    targeting_keywords.append(label.description)
  
  return targeting_keywords

Run our images through the function, and return a lookup table (reference)


In [ ]:
imageslookup = {}

for sku, url in image_url_list.items():
    print(f"Vision analysis completed for: {url}")
    imageslookup[sku] = vision_analysis(url)

print("\nCreating lookup table:")
pprint.pprint(imageslookup, indent=4)

Now we have our new labels from the Vision API, we need to write these into the keywords targeting field


In [ ]:
for product in imageslookup:
  sdf_df.loc[sdf_df.Name.str.contains(product), \
      'Keyword Targeting - Include'] = ";".join(imageslookup[product]).lower()

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update3_keywords.csv', index=False)

sdf_df.head()

In [ ]:
# Download modified csv to local storage
files.download('sdf_update3_keywords.csv')

print("Success, look for the file called 'sdf_update3_keywords.csv' in your downloads folder")

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.5 Optimisation using Reports

Next, we'll look at how you could combine reporting data, with operations such as optimising bid multipliers or deactivating activity. Note: your new campaign has no performance history, so we'll use an existing campaign for this exercise.


In [ ]:
# Define DV360 report definition (i.e. metrics and filters)
report_definition = {
  'params': {
    'type': 'TYPE_GENERAL',
    'metrics': [
      'METRIC_IMPRESSIONS',
      'METRIC_CLICKS',
      'METRIC_CTR',
      'METRIC_REVENUE_ADVERTISER'
    ],
    'groupBys': [
      'FILTER_ADVERTISER',
      'FILTER_INSERTION_ORDER',
      'FILTER_LINE_ITEM',
      'FILTER_ADVERTISER_CURRENCY'
    ],
    'filters': [
      {
        'type': 'FILTER_ADVERTISER', 
        'value': ADVERTISER_ID
      }
    ],
  },
  "metadata": {
    "title": "DV360 Automation API-generated report",
    "dataRange": 'LAST_90_DAYS',
    "format": "csv"
  },
  "schedule": {
    'frequency': "ONE_TIME"
  }
}

# Create new query using report definition
operation = dbm_service.queries().createquery(body=report_definition).execute()
pprint.pprint(operation)

# Runs the given Queries.getquery request, retrying with an exponential
# backoff. Returns completed operation. Will raise an exception if the
# operation takes more than five hours to complete.
@retry.Retry(predicate=retry.if_exception_type(Exception), 
             initial=5, maximum=60, deadline=18000)
def check_get_query_completion(getquery_request):
  response = getquery_request.execute()
  pprint.pprint(response)
  if response['metadata']['running']:
    raise Exception("The operation has not completed.")
  return response

getquery_request = dbm_service.queries().getquery(queryId=operation['queryId'])
response = check_get_query_completion(getquery_request)

In [ ]:
report_url = response['metadata']['googleCloudStoragePathForLatestReport']

# Use skipfooter to remove report footer from data
report_df = pd.read_csv(report_url, skipfooter=16, engine='python')
report_df.head(10)

In [ ]:
# Define our 'KPIs'
ctr_target = 0.15
imp_threshold = 10000

# Convert IDs to remove decimal point, then string
report_df['Line Item ID'] = report_df['Line Item ID'].apply(int)
poor_performers = report_df.query('Impressions > @imp_threshold & (Clicks / Impressions)*100 < @ctr_target')

# Convert results to Python list
poor_performers = list(poor_performers['Line Item ID'])

print(f"There are {len(poor_performers)} LineItems with a CTR"
      f" < {ctr_target}% and over {imp_threshold} impressions:"
      f"\n{poor_performers}")

Download an updated SDF LineItems file, and if the LineItem ID is in the poor performers list, add a Geo bid multiplier to half the bids (0.5)


In [ ]:
# Configure the sdfdownloadtasks.create request
sdf_body = {
    "version": SDF_VERSION_DV360,
    "advertiserId": ADVERTISER_ID,
    "parentEntityFilter": {
        "fileType": ["FILE_TYPE_LINE_ITEM"],
        "filterType": "FILTER_TYPE_CAMPAIGN_ID",
        "filterIds": ["1914007"]
    }
}

sdf_df = download_sdf(sdf_body)
sdf_df.head()

for li in poor_performers:
  geo = sdf_df.loc[sdf_df['Line Item Id'] == li, "Geography Targeting - Include"].iloc[0]
  sdf_df.loc[sdf_df['Line Item Id'] == li, "Bid Multipliers"] = f"(geo; {geo} 0.5;);"

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update4_bidmultipliers.csv', index=False)

# Display updated DataFrame
sdf_df.head()

In [ ]:
files.download('sdf_update4_bidmultipliers.csv')

print('Success, your new SDF file has been downloaded')

Upload the output .csv file in the DV360 UI

Once the changes have been applied successfully, check the 'Targeting' controls within 'Line Item details'

1.6 Challenge

Challenge: update your campaign with both language and audience targeting.

  • All Lineitems should target the following Google audiences

    • Affinity Categories » Technology » Mobile Enthusiasts
    • Affinity Categories » Technology » Technophiles » High-End Computer Aficionado
    • In-Market Categories » Consumer Electronics
  • LineItems for France, should be targeted at French speakers

  • LineItems for Great Britain, should be targeted at English speakers

Tips

  • Google Audience IDs can be found in the DV360 UI or by downloading an SDF with an existing audience applied
  • Language IDs can be found in the Language.json ERF file or by downloading an SDF with the language already applied

In [ ]:
#TODO

Solution


In [ ]:
# Format today-2 in required date format
yesterday = (datetime.date.today() - datetime.timedelta(2)).strftime('%Y%m%d')

# Download ERF for Language.json from public GCS bucket
request = gcs_service.objects().get_media(
    bucket="gdbm-public",
    object='entity/' + yesterday + '.0.Language.json')

response = request.execute()
languages = json.loads(response)

language_targets = ['en', 'fr']
lang_targeting_ids = {}

# Search language.json for language targets 'en' and 'fr'
for lang in languages:
  if lang['code'] in language_targets:
    lang_targeting_ids[lang['code']] = lang['id']
    print(lang)
    
print(lang_targeting_ids)

# Define targeting template
targeting_template = {
 'Affinity & In Market Targeting - Include': '4569529;4586809;4497529;',
 }

# Configure the sdfdownloadtasks.create request
sdf_body = {
    "version": SDF_VERSION_DV360,
    "advertiserId": ADVERTISER_ID,
    "parentEntityFilter": {
        "fileType": ["FILE_TYPE_LINE_ITEM"],
        "filterType": "FILTER_TYPE_CAMPAIGN_ID",
        "filterIds": [CAMPAIGN_ID]
    }
}

sdf_df = download_sdf(sdf_body)

# Update DataFrame with Language and Audience targeting
sdf_df.loc[sdf_df.Name.str.contains("GB"), "Language Targeting - Include"] = f"{lang_targeting_ids['en']};"
sdf_df.loc[sdf_df.Name.str.contains("FR"), "Language Targeting - Include"] = f"{lang_targeting_ids['fr']};"
sdf_df['Affinity & In Market Targeting - Include'] = targeting_template['Affinity & In Market Targeting - Include']

# Save modified dataframe to remote storage in Colab
sdf_df.to_csv('sdf_update5_challenge.csv', index=False)

# Display updated DataFrame
sdf_df.head()

In [ ]:
# Download file to disk using Colab syntax
files.download('sdf_update5_challenge.csv')

print("Success, check your downloads folder for a file called 'sdf_update5_challenge.csv'")

Upload the output .csv file in the DV360 UI

2) Display & Video 360 API

What is the Display & Video 360 API? The Display & Video 360 API (formly known as the DV360 Write API) is the programmatic interface for the Display & Video 360 platform. It allows developers to easily and efficiently automate complex Display & Video 360 workflows, such as creating insertion orders and setting targeting options for individual line items.

We'll use it now to build upon the campaign we created earlier using SDF.

2.1 Campaign builds

Check Advertiser (ADVERTISER_ID) has active Lineitems


In [ ]:
request = display_video_service.advertisers().lineItems().list(
    advertiserId=ADVERTISER_ID,
    filter='entityStatus="ENTITY_STATUS_ACTIVE"',
    pageSize=1
    )

response = request.execute()

# Check if response is empty.
if not response:
  print("Advertiser has no active Line Items")
else:
  pprint.pprint(response['lineItems'])

In [ ]:
def get_active_lineitems(advertiser_id, campaign_id):
  """ Returns list of Lineitems with active status """
  list_lineitems = display_video_service.advertisers().lineItems().list(
        advertiserId=ADVERTISER_ID,
        filter=f'entityStatus="ENTITY_STATUS_ACTIVE" AND campaignId="{CAMPAIGN_ID}"',
        fields='lineItems(lineItemId,displayName)' # Return only two fields
        ).execute()

  active_lineitems = [li['lineItemId'] for li in list_lineitems['lineItems']]
  return active_lineitems

Upload the extended feed for Google Store's new territories: Spain, Germany and Portugal.


In [ ]:
# Upload product feed using Colab's upload utility
product_feed_csv = files.upload()

contents = next(iter(product_feed_csv.values())).decode('utf-8')
products = list(csv.DictReader(io.StringIO(contents)))

# Create unique list of country-codes -- set() automatically de dupes
unique_country_codes = set([row['country code'] for row in products])

print(unique_country_codes)

Create Insertion Order template

Here we're defining a new a function called 'create_insertion_order'. Note: all new Insertion Orders and Line Items created using the DV360 API are created in 'Draft' mode (as a safety mechanism), and must be activated with a second API call, or via the UI (e.g. manually by a trader).


In [ ]:
def create_insertion_order(parent_campaign_id, new_io_name):
  """Creates a new DV360 insertion order object."""

  # Define our new Insertion Order boilerplate
  newInsertionOrder = {
    "campaignId": parent_campaign_id,
    "displayName": new_io_name, # Define naming convention
    "entityStatus": 'ENTITY_STATUS_DRAFT',
    "pacing": {
        "pacingPeriod": "PACING_PERIOD_DAILY", 
        "pacingType": "PACING_TYPE_EVEN",
        "dailyMaxMicros": '1000000' # Equiv to $1 or local currency
    },
    "frequencyCap": {
        "unlimited": False,
        "timeUnit": 'TIME_UNIT_MONTHS',
        "timeUnitCount": 1,
        "maxImpressions": 5
    },
    "performanceGoal": {
      "performanceGoalType": 'PERFORMANCE_GOAL_TYPE_CPC',
      "performanceGoalAmountMicros": '1000000', # $1 CPM/CPC target
    },
    "bidStrategy": {
        "fixedBid": {
           "bidAmountMicros": "0" 
        },
    },
    "budget": {
      "automationType": "INSERTION_ORDER_AUTOMATION_TYPE_NONE",
      "budgetUnit": "BUDGET_UNIT_CURRENCY",
      "budgetSegments": [
        {
            "budgetAmountMicros": '30000000', # Equiv to $30 or local currency
            "description": 'My first segment',
            "dateRange": {
                "startDate": {
                    "year": year,
                    "month": month,
                    "day": day
                },
                "endDate":  {
                    "year": year_plus30,
                    "month": month_plus30,
                    "day": day_plus30
                }
            }
        }
    ]
    }
  }

  # API create() request to generate new Insertion Order
  newinsertionorder_request = display_video_service.advertisers().insertionOrders().create(
    advertiserId=ADVERTISER_ID,
    body=newInsertionOrder
    ).execute()
  
  # Define patch to activate new Insertion Order afer creation
  patch = {
    "entityStatus": "ENTITY_STATUS_ACTIVE",
    }

  # API patch() request
  patchinsertionorder = display_video_service.advertisers().insertionOrders().patch(
    advertiserId=ADVERTISER_ID,
    insertionOrderId=newinsertionorder_request['insertionOrderId'],
    updateMask="entityStatus",
    body=patch
    ).execute()

  print(newinsertionorder_request)
  return (newinsertionorder_request)


print("Insertion Order function created")

Create LineItem template

Here we define a new function called 'create_lineitem', based on a template we specified.

Note: the following template does not include any targeting controls by default. Normally, we strongly encourage the addition of targeting before activating a line item.


In [ ]:
def create_lineitem(parent_io_id, new_li_name):
  """Creates a new DV360 lineitem object."""

  # Define our new LineItem boilerplate
  newLineItem = {
  "advertiserId": ADVERTISER_ID,
  "insertionOrderId": parent_io_id,
  "displayName": new_li_name, # Define naming convention
  "lineItemType": 'LINE_ITEM_TYPE_DISPLAY_DEFAULT',
  "entityStatus": 'ENTITY_STATUS_DRAFT',
  "flight": {
    "flightDateType": "LINE_ITEM_FLIGHT_DATE_TYPE_INHERITED",
  },
  "budget": {
    "budgetAllocationType": "LINE_ITEM_BUDGET_ALLOCATION_TYPE_FIXED",
    "maxAmount": "5000000" # $5 budget
  },
  "pacing": {
      "pacingPeriod": "PACING_PERIOD_DAILY", 
      "pacingType": "PACING_TYPE_EVEN",
      "dailyMaxMicros": '1000000' # $1 per day
  },
  "frequencyCap": {
    "unlimited": True
  },
  "creativeIds": [
    # TO DO
  ],
  "partnerRevenueModel": {
    "markupType": 'PARTNER_REVENUE_MODEL_MARKUP_TYPE_MEDIA_COST_MARKUP',
    "markupAmount": '0'
  },
  "bidStrategy": {
      "fixedBid": {
          "bidAmountMicros": '1000000'
      },
    }
  }

  # API create() request to generate new Lineitem
  newlineitem_request = display_video_service.advertisers().lineItems().create(
    advertiserId=ADVERTISER_ID, 
    body=newLineItem
    ).execute()

  # Define patch to activate new Line Item afer creation
  patch = {
    "entityStatus": "ENTITY_STATUS_ACTIVE",
    }

  # API patch() request
  patchlineitem = display_video_service.advertisers().lineItems().patch(
    advertiserId=ADVERTISER_ID,
    lineItemId=newlineitem_request['lineItemId'],
    updateMask="entityStatus",
    body=patch
    ).execute()
  
  print(newlineitem_request)
  return(newlineitem_request)

print("LineItem function created")

Build our new campaign

First, we'll loop through the list of countries generated at the beginning, and for each country, create a new Insertion Order by calling our function 'create_insertion_order'. Within that loop, we find every product that is sold in the corresponding country-code, and create a new Line Item for every matching product using our function 'create_lineitem'.

Sit tight, this one can take a while (~10 mins)...

Link to DV360 UI


In [ ]:
%%time
for country_code in unique_country_codes:
  # Create() and patch() new Insertion Order
  io_name = f"Google Store | {country_code} | Display | Prospecting"
  insertionorder = create_insertion_order(CAMPAIGN_ID, io_name)
  for row in products:
    if country_code in row['country code']:
      # Create() and patch() new LineItem
      li_name = f"{row['country code']} | {row['title']} | {row['sku']}"
      lineitem = create_lineitem(insertionorder['insertionOrderId'], li_name)

print("Process completed")

If successful, the result should look similar to the below in DV360:

Retrieve a list of available targeting options using targetingTypes().targetingOptions()

The following example demonstrates retrieving of Browser targeting options only.

The "BrowserDetails" field is only applicable with "TARGETING_TYPE_BROWSER".


In [ ]:
# Create the page token variable.
next_page_token = ""

while True:
# Request the targeting options list.
  response = display_video_service.targetingTypes().targetingOptions().list(
    advertiserId=ADVERTISER_ID,
    targetingType="TARGETING_TYPE_BROWSER",
    pageToken=next_page_token
    ).execute()

  # Check if response is empty.
  if not response:
    print("List request returned no Targeting Options")
    break

  # Iterate over retrieved targeting options.
  options_dict = {}
  for option in response['targetingOptions']:
    options_dict[option['targetingOptionId']] = option['browserDetails']['displayName']

  # Break out of loop if there is no next page.
  if 'nextPageToken' not in response:
    break

  # Update the next page token.
  next_page_token = response['nextPageToken']

pprint.pprint(options_dict)

Apply individual targeting criteria to single entity


In [ ]:
# Return list of Lineitems with active status
active_lineitems = get_active_lineitems(ADVERTISER_ID, CAMPAIGN_ID)

# Fetch first Lineitem ID
lineitem_id = active_lineitems[0]

# Create a assigned targeting option object.
assigned_targeting_option_obj = {
  'browserDetails': {
      'targetingOptionId': '500072'
  }
}

# Create the assigned targeting option.
assigned_targeting_option = display_video_service.advertisers().lineItems()\
.targetingTypes().assignedTargetingOptions().create(
  advertiserId=ADVERTISER_ID,
  lineItemId=f"{lineitem_id}",
  targetingType="TARGETING_TYPE_BROWSER",
  body=assigned_targeting_option_obj
).execute()

# Display the new assigned targeting option.
print(f"Assigned Targeting Option {assigned_targeting_option['name']} was created.")

Applying individual targeting criteria to multiple entities


In [ ]:
# Create the page token variable.
next_page_token = ""

while True:
# Request the targeting options list.
  response = display_video_service.googleAudiences().list(
    advertiserId=ADVERTISER_ID,
    filter='displayName : "Technology"',
    pageToken=next_page_token
    ).execute()

  # Check if response is empty.
  if not response:
    print("List request returned no Targeting Options")
    break

  # Iterate over retrieved targeting options.
  options_dict = {}
  for option in response['googleAudiences']:
    options_dict[option['googleAudienceId']] = [option['displayName'], option['googleAudienceType']]

  # Break out of loop if there is no next page.
  if 'nextPageToken' not in response:
    break

  # Update the next page token.
  next_page_token = response['nextPageToken']

pprint.pprint(response)

In [ ]:
google_audience_id = "92948"

# Return list of Lineitems with active status
active_lineitems = get_active_lineitems(ADVERTISER_ID, CAMPAIGN_ID)

# Create a assigned targeting option object.
assigned_targeting_option_obj = {
    "audienceGroupDetails": {
      "includedGoogleAudienceGroup": {
        "settings": [{"googleAudienceId": f"{google_audience_id}"}]
      }
    }
}

pprint.pprint(assigned_targeting_option_obj)

# Update bulk targeting
for li in active_lineitems:
  # Create the assigned targeting option.
  assigned_targeting_option = display_video_service.advertisers().lineItems().\
  targetingTypes().assignedTargetingOptions().create(
    advertiserId=ADVERTISER_ID,
    lineItemId=f'{li}',
    targetingType='TARGETING_TYPE_AUDIENCE_GROUP',
    body=assigned_targeting_option_obj
  ).execute()
  # Display the new assigned targeting option.
  print(f"Assigned Targeting Option {assigned_targeting_option['name']} was created.")

2.3 Bulk targeting

Bulk updates using templated targeting controls


In [ ]:
def set_default_li_targeting(lineitem_id):
  """Sets default LineItem targeting according to standard template."""

  # Define 'Channels'
  createChannelAssignedTargetingOptions = []
  for targeting_id in ['1777746835', '1778039430']:
    createChannelAssignedTargetingOptions.append(
        {'channelDetails': {'channelId': targeting_id, "negative": False }}
    )

  # Define 'Inventory'
  createInventoryAssignedTargetingOptions = []
  for targeting_id in ['1']:
    createInventoryAssignedTargetingOptions.append(
        {'inventorySourceDetails': {'inventorySourceId': targeting_id}}
    )

  # Define 'Sensitive categories'
  createSensitiveCatAssignedTargetingOptions = []
  SensitiveCategory = [
          '1163177997', 
          '1163178297', 
          '118521027123', 
          '118521027843',
          '118521028083',
          '118521028563',
          '118521028803',
          '1596254697'
    ]
  for targeting_id in SensitiveCategory:
    createSensitiveCatAssignedTargetingOptions.append(
        {'sensitiveCategoryExclusionDetails': {'excludedTargetingOptionId': targeting_id}}
    )

  # Define 'Digital content labels'
  createDigitalContentAssignedTargetingOptions = []
  ContentRatingTier = [
          '19875634320',
          '19875634200',
          '19875634080'
    ]
  for targeting_id in ContentRatingTier:
    createDigitalContentAssignedTargetingOptions.append(
        {'digitalContentLabelExclusionDetails': {'excludedTargetingOptionId': targeting_id}}
    )

  # Contruct request
  bulk_edit_line_item_request = {
    'createRequests': [
        {
            'targetingType': 'TARGETING_TYPE_CHANNEL',
            'assignedTargetingOptions': [createChannelAssignedTargetingOptions]
        },
        {
            'targetingType': 'TARGETING_TYPE_INVENTORY_SOURCE',
            'assignedTargetingOptions': [createInventoryAssignedTargetingOptions]
        },
        {
            'targetingType': 'TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION',
            'assignedTargetingOptions': [createSensitiveCatAssignedTargetingOptions]
        },
        {
            'targetingType': 'TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION',
            'assignedTargetingOptions': [createDigitalContentAssignedTargetingOptions]
        },
    ]
  }

  # Edit the line item targeting.
  request = display_video_service.advertisers().lineItems().bulkEditLineItemAssignedTargetingOptions(
      advertiserId=ADVERTISER_ID,
      lineItemId=lineitem_id,
      body=bulk_edit_line_item_request
      )

  response = request.execute()

  # Check if response is empty.
  # If not, iterate over and display new assigned targeting options.
  if not response:
    print("Bulk edit request created no new AssignedTargetingOptions")
  else:
    for assigned_targeting_option in response["createdAssignedTargetingOptions"]:
      print("Assigned Targeting Option %s was created."
            % assigned_targeting_option["name"])

print("Lineitem targeting function created")

Retrieve list of active LineItems, and Apply bulk targeting


In [ ]:
# Return list of Lineitems with active status
active_lineitems = get_active_lineitems(ADVERTISER_ID, CAMPAIGN_ID)

# Update bulk targeting
for li in active_lineitems:
  set_targeting = set_default_li_targeting(li)

2.4 Optimisation (external trigger)

The following optimisations will be completed on your campaign, created earlier.

Create functions to 'deactivate' or 'optimise' Lineitems


In [ ]:
def optimise_lineitem(lineitem_id, action):
  """ Optimises lineitem according to given parameter."""

  lineitem_object = display_video_service.advertisers().lineItems().get(
      advertiserId=ADVERTISER_ID,
      lineItemId=lineitem_id).execute()

  if lineitem_object['entityStatus'] == 'ENTITY_STATUS_ACTIVE':
    if action == "pause":
      patch = {
      "entityStatus": "ENTITY_STATUS_PAUSED",
      }
      
      lineitem_patched = display_video_service.advertisers().lineItems().patch(
        advertiserId=ADVERTISER_ID,
        lineItemId=lineitem_id,
        updateMask="entityStatus",
        body=patch).execute()
      print(f"LineItemID {lineitem_patched['name']} was paused")
    
    elif action == "optimise":
      patch = {
      'bidStrategy': {
          'fixedBid': { "bidAmountMicros": '500000' },
          }
      }
      
      lineitem_patched = display_video_service.advertisers().lineItems().patch(
        advertiserId=ADVERTISER_ID,
        lineItemId=lineitem_id,
        updateMask="bidStrategy",
        body=patch).execute()

      print(f"{lineitem_patched['name']} was optimised")
    else:
      print("Not a valid action, must be either 'pause' or 'optimise'")
  else:
    print(f"{lineitem_object['name']} already paused/archived - no action taken")

print("Optimisation function created")

Creat list of out of stock products


In [ ]:
out_of_stock_list = []
products = csv.DictReader(io.StringIO(contents))

# Iterate through each row, checking for products where availability = 0
for row in products:
  if row['availability'] == '0':
    out_of_stock_list.append(row['sku'])

# This should generate a list of 9 SKUs that are no-longer in stock    
print(f"We've found {len(out_of_stock_list)} out-of-stock products: {out_of_stock_list}")

Process optimisation


In [ ]:
# Return list of Lineitems with active status
active_lineitems = get_active_lineitems(ADVERTISER_ID, CAMPAIGN_ID)

# Iterate through out-of-stock list. If sku is found in lineitem's name, perform optimisation.
for product in out_of_stock_list:
  for k, v in active_lineitems.items():
    if product in k:
      optimise_operation = optimise_lineitem(v, 'pause')

2.5 Optimisation (reporting data)

As your new campaign has no performance data, the following optimisations will be completed on an existing campaign with historical data.

Create new performance report and fetch results


In [ ]:
# Define DV360 report definition (i.e. metrics and filters)
report_definition = {
  'params': {
    'type': 'TYPE_GENERAL',
    'metrics': [
      'METRIC_IMPRESSIONS',
      'METRIC_CLICKS',
      'METRIC_CTR',
      'METRIC_REVENUE_ADVERTISER'
    ],
    'groupBys': [
      'FILTER_ADVERTISER',
      'FILTER_INSERTION_ORDER',
      'FILTER_LINE_ITEM',
      'FILTER_ADVERTISER_CURRENCY'
    ],
    'filters': [
      {
        'type': 'FILTER_ADVERTISER', 
        'value': ADVERTISER_ID
      }
    ],
  },
  "metadata": {
    "title": "DV360 Automation API-generated report",
    "dataRange": 'LAST_90_DAYS',
    "format": "csv"
  },
  "schedule": {
    'frequency': "ONE_TIME"
  }
}

# Create new query using report definition
operation = dbm_service.queries().createquery(body=report_definition).execute()
pprint.pprint(operation)

# Runs the given Queries.getquery request, retrying with an exponential
# backoff. Returns completed operation. Will raise an exception if the
# operation takes more than five hours to complete.
@retry.Retry(predicate=retry.if_exception_type(Exception), initial=5, maximum=60, deadline=18000)
def check_get_query_completion(getquery_request):
  response = getquery_request.execute()
  pprint.pprint(response)
  if response['metadata']['running']:
    raise Exception("The operation has not completed.")
  return response

getquery_request = dbm_service.queries().getquery(queryId=operation['queryId'])
response = check_get_query_completion(getquery_request)

Load report to Pandas DataFrame


In [ ]:
# Capture report URL from response
report_url = response['metadata']['googleCloudStoragePathForLatestReport']

# Use skipfooter to remove report footer from data
report_df = pd.read_csv(report_url, skipfooter=16, engine='python')
report_df.head(10)

Create two lists of poorly performing LineItems

  1. LineItems that should be paused
  2. Lineitems to reduce bids

In [ ]:
# Define our 'KPIs'
ctr_to_pause = 0.1
ctr_to_optimise = 0.3
imp_threshold = 5000

# Convert IDs to remove decimal point, then string
report_df['Line Item ID'] = report_df['Line Item ID'].apply(int)
lineitems_to_pause = report_df.query('Impressions > @imp_threshold and (Clicks / Impressions)*100 < @ctr_to_pause')
lineitems_to_reducebid = report_df.query('Impressions > @imp_threshold and (Clicks / Impressions)*100 > @ctr_to_pause < @ctr_to_optimise')

# Convert results to Python list
lineitems_to_pause = list(lineitems_to_pause['Line Item ID'])
lineitems_to_reducebid = list(lineitems_to_reducebid['Line Item ID'])

print(f"Found {len(lineitems_to_pause)} LineItems with a CTR"
      f" < {ctr_to_pause}% and > {imp_threshold} impressions:"
      f"\n{lineitems_to_pause}\n")

print(f"Found {len(lineitems_to_reducebid)} LineItems with a CTR"
      f" between {ctr_to_pause}%-{ctr_to_optimise}%, and > {imp_threshold} impressions:"
      f"\n{lineitems_to_reducebid}")

Process optimisation


In [ ]:
%%time
if lineitems_to_pause:
  for lineitem in lineitems_to_pause:
    optimise_lineitem(str(lineitem), 'pause')

if lineitems_to_reducebid:
  for lineitem in lineitems_to_reducebid:
    optimise_lineitem(str(lineitem), 'optimise')
  
print("Optimisation completed")

2.6 Creative upload

Uploading Display creatives from remote storage (http)

The following demonstrates how to upload image assets from remote storage, but it's also possible to upload from local storage.

Reference: https://developers.google.com/display-video/api/guides/creating-creatives/overview


In [ ]:
def upload_creative_image_asset(asset_url, click_url):
  """Creates a new DV360 creative object."""

  # Fetch asset from cloud storage using requests library
  asset = requests.get(asset_url)

  # Create upload object from http image url
  fh = io.BytesIO(asset.content)
  media_body = http.MediaIoBaseUpload(fh, mimetype='image/png',
                                      chunksize=1024*1024, resumable=True)

  # Extract filename from url path
  filename = str(asset_url.rsplit(sep="/", maxsplit=1)[1])

  # Create the request body
  body = {
    'filename': filename
  }

  # Upload the asset
  asset_request = display_video_service.advertisers().assets().upload(
    advertiserId=ADVERTISER_ID,
    body=body,
    media_body=media_body
  ).execute()

  # Display the new asset media ID
  print(f"Asset was created with media ID {asset_request['asset']['mediaId']}")

  display_name = f"{filename}".split(sep=".")[0].lower() + " 300x250"

  # Create a creative object.
  creative_obj = {
    "displayName": f"{display_name}",
    "entityStatus": "ENTITY_STATUS_ACTIVE",
    "creativeType": "CREATIVE_TYPE_STANDARD",
    "hostingSource": "HOSTING_SOURCE_HOSTED",
    "dimensions": {
        "widthPixels": 300,
        "heightPixels": 250
    },
    "assets": [
      {
        "asset": {"mediaId": asset_request['asset']['mediaId']},
        "role": "ASSET_ROLE_MAIN"
      }
    ],
    "exitEvents": [
      {
        "type": "EXIT_EVENT_TYPE_DEFAULT",
        "url": f"{click_url}",
      }
    ]
  }

  creative_request = display_video_service.advertisers().creatives().create(
      advertiserId=ADVERTISER_ID,
      body=creative_obj
      ).execute()

  # Display the new creative ID
  print(f"Creative was created with ID {creative_request['creativeId']}"
        f" and DisplayName '{creative_request['displayName']}'")

  pprint.pprint(creative_request)

print("Creative upload function defined")

Upload image creatives

Note, all of the following assets are the same dimension (300x250) and type 'CREATIVE_TYPE_STANDARD'.

When uploading assets of multiple sizes, the creatives.create body must reflect this.


In [ ]:
image_assets = {
  'https://storage.googleapis.com/dv3workshop/googlestore/pixelbook.png': 
  'https://store.google.com/product/google_pixelbook',
  'https://storage.googleapis.com/dv3workshop/googlestore/googlehome.png': 
  'https://store.google.com/product/google_home_hub',
  'https://storage.googleapis.com/dv3workshop/googlestore/googlehomemini.png': 
  'https://store.google.com/product/google_home_mini',
  'https://storage.googleapis.com/dv3workshop/googlestore/pixel2.png': 
  'https://store.google.com/product/pixel_2',
  'https://storage.googleapis.com/dv3workshop/googlestore/chromecastultra.png': 
  'https://store.google.com/product/chromecast_ultra'
}

for asset, click_url in image_assets.items():
  upload_creative_image_asset(asset, click_url)

2.7 Challenge

Challenge: build a new campaign for 'Google Airways' using the flights feed provided here.

Tips

  • You don't need to rewrite any functions, reuse the existing ones
  • Don't forget to use print() statements to see progress within a for loop

Your final campaign should look similar to the below:


In [ ]:
#TODO

Solution


In [ ]:
%%time
# Load flight information from CSV file
googleairways_routes = files.upload()
contents = next(iter(googleairways_routes.values())).decode('utf-8')
routes = list(csv.DictReader(io.StringIO(contents)))

# Create a unique set (de-duped) of cities from the routes provided
unique_cities = set()
for row in routes:
 unique_cities.add(row['airport-city'])
print(unique_cities)

# Create Campaign and Patch()
new_campaign = create_campaign("Google Airways")
print(new_campaign)

# Step through each city within our unique set of cities
for city in unique_cities:
 # Create Insertion Order and Patch()
 io_name = f"Flights | {city}"
 create_io = create_insertion_order(new_campaign['campaignId'], io_name)
# Step through each route(row) of the CSV upload
 for row in routes:
   if city == row['airport-city']:
     # Create LineItems and Patch()
     li_name = f"Flight {row['flightno']} | {row['depairport-city']} to {row['arrairport-city']}"
     create_lis = create_lineitem(create_io['insertionOrderId'], li_name)

print("Process completed")

Resources


Please help us improve this workshop by completing the satisfaction survey

Thank you!

Cleaning up

To clean up all of the DV360 resources used during these exercises, you can run the following script. Warning: this will remove all Campaigns from the DV360 advertiser specified in ADVERTISER_ID, unless they are explicitly defined as a 'protected_campaign'


In [ ]:
# @title { display-mode: "form" }
#@markdown Reset DV360 account

# Exclude following campaigns in the reset process
protected_campaigns = ['1914007','985747']

def reset_demo_account():
  """Reset DV360 account to earlier state."""

  print("Resetting DV360 account...")
  
  # Reactivate Campaigns
  list_campaigns = display_video_service.advertisers().campaigns().list(
    advertiserId=ADVERTISER_ID,
    filter='entityStatus="ENTITY_STATUS_ACTIVE"'
    ).execute()

  results = list_campaigns['campaigns']
  print(f"Found {len(results)} active campaigns")

  for index, campaign in enumerate(results, start=1):
    print(f"Campaign {index} of {len(results)}")
    pause_campaign(campaign['campaignId'])

  # Reactivate LineItems
  list_lineitems = display_video_service.advertisers().lineItems().list(
  advertiserId=ADVERTISER_ID,
  filter='entityStatus="ENTITY_STATUS_PAUSED" AND campaignId="1914007"'
  ).execute()

  if len(list_lineitems) == 0:
    print("No paused lineitems found")
  else:
    for index, li in enumerate(list_lineitems['lineItems'], start=1):
      print(f"Lineitem {index} of {len(list_lineitems['lineItems'])}")
      lineitem_id = li['lineItemId']
      activate_lineItem(lineitem_id)
  
  print("Account reset completed")

def delete_campaign(campaign_id):
  """Updates DV360 campaign object status to deleted"""

  patch = {
    'entityStatus':'ENTITY_STATUS_DELETED'
  }

  if campaign_id in protected_campaigns:
    print(f"Campaign ID {campaign_id} not deleted (protected campaign)")
  else:
    try:
      deleted_campaign = display_video_service.advertisers().campaigns().delete(
        advertiserId=ADVERTISER_ID,
        campaignId=campaign_id
        ).execute()
      print(f"{campaign_id} successfully deleted")
    except:
      print(f"Could not delete campaign")

def archive_campaign(campaign_id):
  """Updates DV360 campaign object status to archived"""

  patch = {
    'entityStatus':'ENTITY_STATUS_ARCHIVED'
  }

  if campaign_id in protected_campaigns:
    print(f"Campaign ID {campaign_id} not archived (protected campaign)")
  else:
    archive_campaign = display_video_service.advertisers().campaigns().patch(
        advertiserId=ADVERTISER_ID,
        campaignId=campaign_id,
        updateMask="entityStatus",
        body=patch
        ).execute()
    print(f"Campaign ID {campaign_id} successfully archived")

def pause_campaign(campaign_id):
  """Updates DV360 campaign object status to paused"""
  
  patch = {
    'entityStatus':'ENTITY_STATUS_PAUSED'
  }

  if campaign_id in protected_campaigns:
    print(f"Campaign ID {campaign_id} not paused (protected campaign)")
  else:
    paused_campaign = display_video_service.advertisers().campaigns().patch(
        advertiserId=ADVERTISER_ID,
        campaignId=campaign_id,
        updateMask="entityStatus",
        body=patch
        ).execute()
    print(f"Campaign ID {campaign_id} successfully paused")

def activate_lineItem(lineitem_id):
  """Updates DV360 lineitem object status to active"""
  
  patch = {
    'entityStatus':'ENTITY_STATUS_ACTIVE'
  }

  activate_lineitem = display_video_service.advertisers().lineItems().patch(
    lineItemId=lineitem_id,
    advertiserId=ADVERTISER_ID,
    updateMask="entityStatus",
    body=patch
    ).execute()
  print(f"Lineitem ID {lineitem_id} reactivated")

# Call main function to intialise reset procedure
reset_demo_account()

Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License