Skip to content

Auto-update dependencies. #1005

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 28, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bigtable/hello/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
google-cloud-bigtable==0.24.0
google-cloud-bigtable==0.25.0
google-cloud-core==0.25.0
38 changes: 18 additions & 20 deletions bigtable/metricscaler/metricscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from google.cloud import monitoring



def get_cpu_load():
"""Returns the most recent Cloud Bigtable CPU load measurement.

Expand Down Expand Up @@ -51,23 +50,22 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
scale_up (bool): If true, scale up, otherwise scale down
"""
_MIN_NODE_COUNT = 3
"""
The minimum number of nodes to use. The default minimum is 3. If you have a
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
metric is useful in figuring out the minimum number of nodes.
"""

_MAX_NODE_COUNT = 30
"""
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
If you need more quota, you can request more by following the instructions
<a href="https://cloud.google.com/bigtable/quota">here</a>.
"""
# The minimum number of nodes to use. The default minimum is 3. If you have
# a lot of data, the rule of thumb is to not go below 2.5 TB per node for
# SSD lusters, and 8 TB for HDD. The
# "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring
# out the minimum number of nodes.
min_node_count = 3

# The maximum number of nodes to use. The default maximum is 30 nodes per
# zone. If you need more quota, you can request more by following the
# instructions at https://cloud.google.com/bigtable/quota.
max_node_count = 30

# The number of nodes to change the cluster by.
size_change_step = 3

_SIZE_CHANGE_STEP = 3
"""The number of nodes to change the cluster by."""
# [START bigtable_scale]
bigtable_client = bigtable.Client(admin=True)
instance = bigtable_client.instance(bigtable_instance)
Expand All @@ -79,16 +77,16 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
current_node_count = cluster.serve_nodes

if scale_up:
if current_node_count < _MAX_NODE_COUNT:
new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT)
if current_node_count < max_node_count:
new_node_count = min(current_node_count + 3, max_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled up from {} to {} nodes.'.format(
current_node_count, new_node_count))
else:
if current_node_count > _MIN_NODE_COUNT:
if current_node_count > min_node_count:
new_node_count = max(
current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT)
current_node_count - size_change_step, min_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled down from {} to {} nodes.'.format(
Expand Down
4 changes: 2 additions & 2 deletions bigtable/metricscaler/metricscaler_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@
from google.cloud import bigtable
from mock import patch

from metricscaler import _SIZE_CHANGE_STEP
from metricscaler import get_cpu_load
from metricscaler import main
from metricscaler import scale_bigtable

# tests assume instance and cluster have the same ID
BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER']
SIZE_CHANGE_STEP = 3

# System tests to verify API calls succeed

Expand All @@ -50,7 +50,7 @@ def test_scale_bigtable():
cluster.reload()

new_node_count = cluster.serve_nodes
assert (new_node_count == (original_node_count + _SIZE_CHANGE_STEP))
assert (new_node_count == (original_node_count + SIZE_CHANGE_STEP))

scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
time.sleep(3)
Expand Down
2 changes: 1 addition & 1 deletion bigtable/metricscaler/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
google-cloud-bigtable==0.24.0
google-cloud-bigtable==0.25.0
google-cloud-monitoring==0.25.0
2 changes: 1 addition & 1 deletion dataproc/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
google-api-python-client==1.6.2
google-cloud==0.25.0
google-cloud==0.26.0
2 changes: 1 addition & 1 deletion iot/api-client/manager/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def create_iot_topic(topic_name):
topic = pubsub_client.topic(topic_name)
policy = topic.get_iam_policy()
publishers = policy.get('roles/pubsub.publisher', [])
publishers.append(policy.service_account(
publishers.add(policy.service_account(
'cloud-iot@system.gserviceaccount.com'))
policy['roles/pubsub.publisher'] = publishers
topic.set_iam_policy(policy)
Expand Down
2 changes: 1 addition & 1 deletion iot/api-client/manager/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
google-api-python-client==1.6.2
google-auth-httplib2==0.0.2
google-auth==1.0.1
google-cloud==0.25.0
google-cloud==0.26.0