Create NFS Exports from a big list

nfs.nfs_add_share() endpoint documentation

Input parameters
  • export_path - NFS export path. Must include the leading slash.
  • fs_path - filesystem path of the exported directory. Must include the leading slash /
  • description - Description of this NFS export. Use this to organize or "spec" your shares.
  • restrictions - A list (or array) of NFS restriction settings, where each setting is a dictionary of the following:
    • read_only - Sets the NFS export to read-only
    • host_restrictions - List of IP addresses, ranges, or network segments to restrict the share to
    • user_mapping - NFS user mapping (squash) support. Valid values include: 0 - Do not map users, 1 - Map root user (uid 0), 2 - Map all users.
    • map_to_user_id - The user ID to map (see /users)
  • allow_fs_path_create - Specifies whether the file system path can be created if it does not already exist. This will also recursively create the non-existent directory
Output

Returns json for the new share object that has been created which is simply the attributes specified in the Input arguments.

python example code

from qumulo.rest_client import RestClient
from qumulo.rest.nfs import NFSRestriction

rc = RestClient("product", 8000)
rc.login("admin", "secret")

out = rc.nfs.nfs_add_share(export_path = '/users/export1',
                    fs_path = '/users/export1',
                    description = 'User Export 1',
                    restrictions = [NFSRestriction({
                                        'read_only': False, 
                                        'host_restrictions': ['10.20.217.62'],
                                        'user_mapping': 'NFS_MAP_NONE', 
                                        'map_to_user_id': '0'})],
                    allow_fs_path_create = True)

print(out)

Create ~1,000 shares from a csv


In [26]:
# Set up rest client
import pandas
import sys
import traceback
import os

from qumulo.rest_client import RestClient
from qumulo.rest.nfs import NFSRestriction

rc = RestClient("<qumulo-cluster>", 8000)
rc.login("<qumulo-user>", "<qumulo-password>");

In [27]:
# a csv list of celbrity names and twitter handles
USER_LIST_URL = "https://gist.githubusercontent.com/mbejda/9c3353780270e7298763/raw/1bfc4810db4240d85947e6aef85fcae71f475493/Top-1000-Celebrity-Twitter-Accounts.csv"
user_list_df = pandas.read_csv(USER_LIST_URL, encoding = 'utf8')

In [28]:
# loop through the celbrity names and create nfs shares
for idx, row in user_list_df.iterrows():
    user_dir = '/users/%s/%s' % (row['twitter'][0].lower(), row['twitter'])
    try:
        share = rc.nfs.nfs_add_share(export_path = user_dir,
                        fs_path = user_dir,
                        description = row['name'],
                        restrictions = [NFSRestriction({
                                            'read_only': False, 
                                            'host_restrictions': [],
                                            'user_mapping': 'NFS_MAP_NONE', 
                                            'map_to_user_id': '0'})],
                        allow_fs_path_create = True)
        print(share)
        quota_res = rc.quota.create_quota(id_ = dir_id, limit_in_bytes = 20000000000)
    except:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
        print "%s" % (str(exc_value)[0:110], )
    break
print("Finished create user directories and NFS exports")


{u'restrictions': [{u'read_only': False, u'host_restrictions': [], u'user_mapping': u'NFS_MAP_NONE', u'map_to_user_id': u'0'}], u'description': u'KATY PERRY', u'id': u'8911', u'fs_path': u'/users/k/katyperry', u'export_path': u'/users/k/katyperry'}
name 'dir_id' is not defined
Finished create user directories and NFS exports

Go take a look at your creation: https://qumulo-cluster/nfs-exports


Delete all existing user shares and directories


In [25]:
# This better be a demo cluster
for share in rc.nfs.nfs_list_shares():
    if '/users' in share['fs_path']:
        rc.nfs.nfs_delete_share(id_=share['id'])
        rc.fs.delete_tree(path = share['fs_path'])

In [ ]:
# finish the cleanup job
rc.fs.delete_tree(path = '/users')

In [ ]: