Skip to content

Commit 549bdba

Browse files
committed
Merge remote-tracking branch 'upstream/master'
fix #71
2 parents a7ac307 + da3b823 commit 549bdba

10 files changed

Lines changed: 23 additions & 17 deletions

File tree

docs/doc.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ Deployments are currently only supported for `docker` job types.
9090
|type|true|string||Has to be "docker-registry"|
9191
|host|true|string||Host of the registry. I.e. my-registry.com:5000|
9292
|repository|true|string||Name of the repository|
93-
|tag|false|string|build\_<NUMBER>|The of the image|
93+
|tag|false|string|build\_\<NUMBER>|The tag of the image|
9494
|target|false|string||When building a Dockerfile with multiple build stages `target` can be used to specify an intermediate build stage by name as a final stage for the resulting image which should be deployed|
9595
|username|false|string||Username to be used with the registry|
9696
|password|false|[Secret](#secrets)||Secret containing the password|
@@ -122,7 +122,7 @@ Deployments are currently only supported for `docker` job types.
122122
|type|true|string||Has to be "ecr"|
123123
|host|true|string||ECR endpoint|
124124
|repository|true|string||Name of the repository|
125-
|tag|false|string|build\_<NUMBER>|The of the image|
125+
|tag|false|string|build\_\<NUMBER>|The tag of the image|
126126
|target|false|string||When building a Dockerfile with multiple build stages `target` can be used to specify an intermediate build stage by name as a final stage for the resulting image which should be deployed|
127127
|region|true|string||AWS Region|
128128
|access\_key\_id|true|[Secret](#secrets)||Secret containing the AWS `Access Key ID`|
@@ -153,7 +153,7 @@ Deployments are currently only supported for `docker` job types.
153153
|type|true|string||Has to be "gcr"|
154154
|host|true|string||GCR endpoint i.e. us.gcr.io|
155155
|repository|true|string||Name of the repository|
156-
|tag|false|string|build\_<NUMBER>|The of the image|
156+
|tag|false|string|build\_\<NUMBER>|The tag of the image|
157157
|target|false|string||When building a Dockerfile with multiple build stages `target` can be used to specify an intermediate build stage by name as a final stage for the resulting image which should be deployed|
158158
|region|true|string||AWS Region|
159159
|service\_account|true|[Secret](#secrets)||Secret containing the GCP `Service Account` with role `Storage Admin`|

src/api/handlers/job_api.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#pylint: disable=too-many-lines,too-few-public-methods,too-many-locals,too-many-statements,too-many-branches
22
import os
33
import json
4-
import time
54
import uuid
65
import copy
76
import urllib
@@ -874,9 +873,6 @@ def post(self):
874873
build_arguments, deployments, limits_cpu, limits_memory, timeout,
875874
resources, json.dumps(job), job['cluster']['name']])
876875

877-
# to make sure the get picked up in the right order by the scheduler
878-
time.sleep(0.1)
879-
880876
g.db.commit()
881877
return "Successfully create jobs"
882878

src/api/handlers/projects/build.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def get(self, project_id, build_id):
124124
''', [build_id, project_id])
125125

126126
for j in jobs:
127-
key = 'project_%s_branch_%s_job_%s.tar.gz' % (project_id, j['branch'], j['name'])
127+
key = 'project_%s_job_%s.tar.snappy' % (project_id, j['name'])
128128
storage.delete_cache(key)
129129

130130
return OK('Cleared cache')

src/api/handlers/projects/jobs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ def get(self, project_id, job_id):
555555
if not job:
556556
abort(404)
557557

558-
key = 'project_%s_branch_%s_job_%s.tar.gz' % (project_id, job['branch'], job['name'])
558+
key = 'project_%s_job_%s.tar.snappy' % (project_id, job['name'])
559559
storage.delete_cache(key)
560560

561561
return OK('Cleared cache')

src/github/trigger/trigger.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,11 @@
1111
from pyinfraboxutils.ibbottle import InfraBoxPostgresPlugin
1212
from pyinfraboxutils.db import connect_db
1313

14+
import bottle
1415
from bottle import post, run, request, response, install, get
1516

17+
bottle.BaseRequest.MEMFILE_MAX = 10 * 1024 * 1024
18+
1619
logger = get_logger("github")
1720

1821
def res(status, message):

src/job/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
FROM docker:17.12.1-ce-dind
22

3-
ENV CLOUD_SDK_VERSION 198.0.0
3+
ENV CLOUD_SDK_VERSION 210.0.0
44
ENV PATH /google-cloud-sdk/bin:$PATH
55

66
RUN apk add --no-cache \

src/job/entrypoint.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ if [ ! -e /var/run/docker.sock ]; then
1313
echo "Waiting for docker daemon to start up"
1414

1515
# Start docker daemon
16-
dockerd-entrypoint.sh --storage-driver overlay --data-root /data/docker &
16+
nohup dockerd-entrypoint.sh --storage-driver overlay --data-root /data/docker > /tmp/dockerd.log &
1717

1818
# Wait until daemon is ready
1919
COUNTER=0
@@ -23,6 +23,7 @@ if [ ! -e /var/run/docker.sock ]; then
2323

2424
if [ $COUNTER -gt 60 ]; then
2525
echo "Docker daemon not started" > '/dev/termination-log'
26+
cat /tmp/dockerd.log >> /dev/termination-log
2627
exit 1
2728
fi
2829
done

src/job/infrabox_job/job.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,9 +94,14 @@ def create_jobs(self, jobs):
9494
}
9595

9696
while True:
97-
r = requests.post("%s/create_jobs" % self.api_server,
98-
headers=self.get_headers(),
99-
json=payload, timeout=60, verify=self.verify)
97+
try:
98+
r = requests.post("%s/create_jobs" % self.api_server,
99+
headers=self.get_headers(),
100+
json=payload, timeout=300, verify=self.verify)
101+
except:
102+
self.console.collect('Failed to connect to API, retrying.', show=True)
103+
time.sleep(3)
104+
continue
100105

101106
if r.status_code == 200:
102107
return

src/job/job.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,9 @@ def check_container_crashed(self):
286286

287287
def main(self):
288288
self.load_data()
289+
# Date
290+
self.console.collect("Date:\n", show=True)
291+
self.console.execute(['date'], show=True, show_cmd=False)
289292

290293
# Show environment
291294
self.console.collect("Environment:\n", show=True)
@@ -503,7 +506,7 @@ def main_run_job(self):
503506
if os.path.isfile(storage_cache_tar):
504507
c.collect("Unpacking cache", show=True)
505508
try:
506-
c.execute(['time', 'tar', '-zxf', storage_cache_tar, '-C', self.infrabox_cache_dir], show=True)
509+
self.uncompress(storage_cache_tar, self.infrabox_cache_dir)
507510
except:
508511
c.collect("Failed to unpack cache\n", show=True)
509512
os.remove(storage_cache_tar)

src/pyinfraboxutils/ibflask.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,6 @@ def decorated_function(*args, **kwargs):
186186

187187
job_state = r[0]
188188
if job_state not in ('queued', 'running', 'scheduled'):
189-
logger.warn('job not in an active state')
190189
abort(401, 'Unauthorized')
191190

192191

@@ -213,7 +212,6 @@ def validate_job_token(token):
213212

214213
job_state = r[0]
215214
if job_state not in ('queued', 'running', 'scheduled'):
216-
logger.warn('job not in an active state')
217215
abort(401, 'Unauthorized')
218216

219217

0 commit comments

Comments
 (0)