|
1 | 1 | """Endpoints relating to task/job status, etc."""
|
2 | 2 |
|
3 | 3 | import structlog
|
4 |
| - |
| 4 | +from django.core.cache import cache |
5 | 5 | from django.urls import reverse
|
6 |
| -from redis import ConnectionError |
7 | 6 | from rest_framework import decorators, permissions
|
8 | 7 | from rest_framework.renderers import JSONRenderer
|
9 | 8 | from rest_framework.response import Response
|
10 | 9 |
|
11 |
| -from readthedocs.core.utils.tasks import TaskNoPermission, get_public_task_data |
12 | 10 | from readthedocs.oauth import tasks
|
13 | 11 |
|
14 |
| - |
15 | 12 | log = structlog.get_logger(__name__)
|
16 | 13 |
|
17 |
| -SUCCESS_STATES = ('SUCCESS',) |
18 |
| -FAILURE_STATES = ( |
19 |
| - 'FAILURE', |
20 |
| - 'REVOKED', |
21 |
| -) |
22 |
| -FINISHED_STATES = SUCCESS_STATES + FAILURE_STATES |
23 |
| -STARTED_STATES = ('RECEIVED', 'STARTED', 'RETRY') + FINISHED_STATES |
24 |
| - |
25 |
| - |
26 |
| -def get_status_data(task_name, state, data, error=None): |
27 |
| - data = { |
28 |
| - 'name': task_name, |
29 |
| - 'data': data, |
30 |
| - 'started': state in STARTED_STATES, |
31 |
| - 'finished': state in FINISHED_STATES, |
32 |
| - # When an exception is raised inside the task, we keep this as SUCCESS |
33 |
| - # and add the exception message into the 'error' key |
34 |
| - 'success': state in SUCCESS_STATES and error is None, |
35 |
| - } |
36 |
| - if error is not None: |
37 |
| - data['error'] = error |
38 |
| - return data |
39 |
| - |
40 | 14 |
|
41 | 15 | @decorators.api_view(['GET'])
|
42 | 16 | @decorators.permission_classes((permissions.AllowAny,))
|
43 | 17 | @decorators.renderer_classes((JSONRenderer,))
|
44 | 18 | def job_status(request, task_id):
|
45 |
| - try: |
46 |
| - task_name, state, public_data, error = get_public_task_data( |
47 |
| - request, |
48 |
| - task_id, |
49 |
| - ) |
50 |
| - except (TaskNoPermission, ConnectionError): |
51 |
| - return Response(get_status_data('unknown', 'PENDING', {}),) |
52 |
| - return Response(get_status_data(task_name, state, public_data, error),) |
| 19 | + """Retrieve Celery task function state from frontend.""" |
| 20 | + # HACK: always poll up to N times and after that return the sync has |
| 21 | + # finished. This is a way to avoid re-enabling Celery result backend for now. |
| 22 | + # TODO remove this API and RemoteRepo sync UI when we have better auto syncing |
| 23 | + poll_n = cache.get(task_id, 0) |
| 24 | + poll_n += 1 |
| 25 | + cache.set(task_id, poll_n, 5 * 60) |
| 26 | + finished = poll_n == 5 |
| 27 | + |
| 28 | + data = { |
| 29 | + "name": "sync_remote_repositories", |
| 30 | + "data": {}, |
| 31 | + "started": True, |
| 32 | + "finished": finished, |
| 33 | + "success": finished, |
| 34 | + } |
| 35 | + return Response(data) |
53 | 36 |
|
54 | 37 |
|
55 | 38 | @decorators.api_view(['POST'])
|
56 | 39 | @decorators.permission_classes((permissions.IsAuthenticated,))
|
57 | 40 | @decorators.renderer_classes((JSONRenderer,))
|
58 | 41 | def sync_remote_repositories(request):
|
| 42 | + """Trigger a re-sync of remote repositories for the user.""" |
59 | 43 | result = tasks.sync_remote_repositories.delay(user_id=request.user.id,)
|
60 | 44 | task_id = result.task_id
|
61 | 45 | return Response({
|
|
0 commit comments