Skip to content

Commit 67bf4db

Browse files
committed
[api] Api routes to manage repositories
List a project's repositories: GET /ecosystems/<name>/project/<name>/repos/ Create a dataset (also creates the repository if it does not exist): POST /ecosystems/<name>/project/<name>/repos/ data = { uri, datasource_type, category, scheduler } Get a single repository: GET /ecosystems/<name>/project/<name>/repos/<uuid>/ Get a dataset from a repository: GET /ecosystems/<name>/project/<name>/repos/<uuid>/categories/<category> Delete a dataset (also cancels task): DELETE /ecosystems/<name>/project/<name>/repos/<uuid>/categories/<category> List a project's children (subprojects and repositories): GET /ecosystems/<name>/project/<name>/children/ Signed-off-by: Eva Millán <[email protected]>
1 parent 1f30e6a commit 67bf4db

File tree

8 files changed

+1242
-338
lines changed

8 files changed

+1242
-338
lines changed

docs/openapi.yml

Lines changed: 337 additions & 75 deletions
Large diffs are not rendered by default.

src/grimoirelab/core/app/urls.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,13 @@
1111
from ..views import api_login
1212

1313
from grimoirelab.core.scheduler.urls import urlpatterns as sched_urlpatterns
14-
from grimoirelab.core.datasources.urls import datasources_urlpatterns, ecosystems_urlpatterns
14+
from grimoirelab.core.datasources.urls import ecosystems_urlpatterns
1515

1616
urlpatterns = [
1717
path("login", api_login, name="api_login"),
1818
path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
1919
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
2020
path("scheduler/", include(sched_urlpatterns)),
21-
path("datasources/", include(datasources_urlpatterns)),
2221
path("api/v1/", include([
2322
path("ecosystems/", include(ecosystems_urlpatterns))
2423
])),

src/grimoirelab/core/datasources/api.py

Lines changed: 260 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -16,25 +16,33 @@
1616
# along with this program. If not, see <http://www.gnu.org/licenses/>.
1717
#
1818

19+
import itertools
20+
1921
from rest_framework import (
2022
generics,
2123
pagination,
2224
response,
2325
serializers,
26+
status,
2427
)
2528
from drf_spectacular.utils import (
2629
extend_schema,
2730
extend_schema_view,
31+
extend_schema_serializer,
2832
OpenApiParameter)
2933
from drf_spectacular.types import OpenApiTypes
3034
from django.db.models import Q
35+
from django.conf import settings
3136
from django.shortcuts import get_object_or_404
3237

3338
from .models import (
39+
DataSet,
3440
Repository,
3541
Ecosystem,
3642
Project)
37-
from ..scheduler.api import EventizerTaskListSerializer
43+
from .utils import generate_uuid
44+
from ..scheduler.api import EventizerTaskSerializer
45+
from ..scheduler.scheduler import schedule_task, cancel_task
3846

3947

4048
class DataSourcesPaginator(pagination.PageNumberPagination):
@@ -55,24 +63,15 @@ def get_paginated_response(self, data):
5563
})
5664

5765

58-
class EventizerRepositoryListSerializer(serializers.ModelSerializer):
59-
task = EventizerTaskListSerializer()
60-
61-
class Meta:
62-
model = Repository
63-
fields = [
64-
'uri', 'datasource_type', 'datasource_category', 'task',
65-
]
66-
67-
6866
class ProjectSerializer(serializers.ModelSerializer):
6967
subprojects = serializers.SlugRelatedField(many=True,
7068
read_only=True,
7169
slug_field='name')
70+
repos = serializers.SerializerMethodField()
7271

7372
class Meta:
7473
model = Project
75-
fields = ['id', 'name', 'title', 'parent_project', 'subprojects']
74+
fields = ['id', 'name', 'title', 'parent_project', 'subprojects', 'repos']
7675
lookup_field = 'name'
7776

7877
def validate_name(self, value,):
@@ -82,6 +81,9 @@ def validate_name(self, value,):
8281

8382
return value
8483

84+
def get_repos(self, obj):
85+
return Repository.objects.filter(dataset__project=obj).distinct().values('uuid')
86+
8587

8688
class ParentProjectField(serializers.Field):
8789
def to_representation(self, value):
@@ -99,26 +101,6 @@ class ProjectDetailSerializer(ProjectSerializer):
99101
subprojects = ProjectSerializer(many=True, read_only=True)
100102

101103

102-
class RepositoryList(generics.ListAPIView):
103-
serializer_class = EventizerRepositoryListSerializer
104-
pagination_class = DataSourcesPaginator
105-
106-
def get_queryset(self):
107-
datasource = self.request.query_params.get('datasource')
108-
category = self.request.query_params.get('category')
109-
uri = self.request.query_params.get('uri')
110-
111-
queryset = Repository.objects.select_related('task')
112-
if datasource is not None:
113-
queryset = queryset.filter(datasource_type=datasource)
114-
if category is not None:
115-
queryset = queryset.filter(datasource_category=category)
116-
if uri is not None:
117-
queryset = queryset.filter(uri=uri)
118-
119-
return queryset
120-
121-
122104
class EcosystemSerializer(serializers.ModelSerializer):
123105
class Meta:
124106
model = Ecosystem
@@ -187,3 +169,249 @@ def get_queryset(self):
187169
queryset = Project.objects.filter(ecosystem__name=ecosystem_name)
188170

189171
return queryset
172+
173+
174+
class CategorySerializer(serializers.ModelSerializer):
175+
task = EventizerTaskSerializer(read_only=True)
176+
177+
class Meta:
178+
model = DataSet
179+
fields = ['id', 'category', 'task']
180+
181+
182+
class RepoSerializer(serializers.ModelSerializer):
183+
categories = serializers.SlugRelatedField(source='dataset_set',
184+
many=True,
185+
read_only=True,
186+
slug_field='category')
187+
188+
class Meta:
189+
model = Repository
190+
fields = ['uuid', 'uri', 'datasource_type', 'categories']
191+
192+
193+
class RepoDetailSerializer(RepoSerializer):
194+
categories = serializers.SerializerMethodField(read_only=True, method_name='get_categories')
195+
196+
class Meta:
197+
model = Repository
198+
fields = ['uuid', 'uri', 'datasource_type', 'categories']
199+
200+
def get_categories(self, obj):
201+
serializer = CategorySerializer(instance=obj.dataset_set.all(), many=True)
202+
return serializer.data
203+
204+
205+
@extend_schema_serializer(exclude_fields=('project__id'))
206+
class CreateRepoSerializer(serializers.Serializer):
207+
uri = serializers.CharField()
208+
datasource_type = serializers.CharField()
209+
category = serializers.CharField()
210+
project__id = serializers.CharField()
211+
scheduler = serializers.JSONField(required=False)
212+
213+
def validate(self, attrs):
214+
try:
215+
Repository.objects.get(uri=attrs['uri'],
216+
dataset__project__id=attrs['project__id'],
217+
dataset__category=attrs['category'])
218+
except Repository.DoesNotExist:
219+
pass
220+
else:
221+
msg = f"Repository '{attrs['uri']}' with category '{attrs['category']}' already exists in project."
222+
raise serializers.ValidationError(msg)
223+
224+
return attrs
225+
226+
227+
@extend_schema_view(get=extend_schema(
228+
parameters=[
229+
OpenApiParameter('datasource_type', OpenApiTypes.STR, OpenApiParameter.QUERY),
230+
OpenApiParameter('category', OpenApiTypes.STR, OpenApiParameter.QUERY),
231+
OpenApiParameter('uri', OpenApiTypes.STR, OpenApiParameter.QUERY)]
232+
))
233+
@extend_schema(request=CreateRepoSerializer)
234+
class RepoList(generics.ListCreateAPIView):
235+
serializer_class = RepoDetailSerializer
236+
pagination_class = DataSourcesPaginator
237+
model = Repository
238+
239+
def get_queryset(self):
240+
project = get_object_or_404(Project,
241+
name=self.kwargs.get('project_name'),
242+
ecosystem__name=self.kwargs.get('ecosystem_name'))
243+
queryset = Repository.objects.filter(dataset__project=project).distinct()
244+
245+
datasource = self.request.query_params.get('datasource_type')
246+
category = self.request.query_params.get('category')
247+
uri = self.request.query_params.get('uri')
248+
249+
if datasource is not None:
250+
queryset = queryset.filter(datasource_type=datasource)
251+
if category is not None:
252+
queryset = queryset.filter(dataset__category=category).distinct()
253+
if uri is not None:
254+
queryset = queryset.filter(uri=uri)
255+
256+
return queryset
257+
258+
def create(self, request, *args, **kwargs):
259+
# Get project from URL params
260+
project = get_object_or_404(Project,
261+
name=self.kwargs.get('project_name'),
262+
ecosystem__name=self.kwargs.get('ecosystem_name'))
263+
request.data['project__id'] = project.id
264+
265+
# Validate request data
266+
serializer = CreateRepoSerializer(data=request.data)
267+
if serializer.is_valid():
268+
# Create repository if it does not exist yet
269+
uuid = generate_uuid(str(request.data['uri']), str(request.data['datasource_type']))
270+
repository, _ = Repository.objects.get_or_create(uri=request.data['uri'],
271+
datasource_type=request.data['datasource_type'],
272+
uuid=uuid)
273+
# Create data set
274+
dataset = DataSet.objects.create(project=project,
275+
repository=repository,
276+
category=request.data['category'])
277+
278+
# Create task
279+
job_interval = settings.GRIMOIRELAB_JOB_INTERVAL
280+
job_max_retries = settings.GRIMOIRELAB_JOB_MAX_RETRIES
281+
if 'scheduler' in request.data:
282+
job_interval = request.data['scheduler'].get('job_interval', job_interval)
283+
job_max_retries = request.data['scheduler'].get('job_max_retries', job_max_retries)
284+
285+
task_args = {
286+
'uri': request.data['uri']
287+
}
288+
task = schedule_task(
289+
'eventizer', task_args,
290+
datasource_type=request.data['datasource_type'],
291+
datasource_category=request.data['category'],
292+
job_interval=job_interval,
293+
job_max_retries=job_max_retries
294+
)
295+
dataset.task = task
296+
dataset.save()
297+
response_serializer = self.get_serializer(repository)
298+
299+
return response.Response(response_serializer.data, status=status.HTTP_201_CREATED)
300+
return response.Response(serializer.errors, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
301+
302+
303+
class RepoDetail(generics.RetrieveAPIView):
304+
serializer_class = RepoDetailSerializer
305+
model = Repository
306+
lookup_field = 'uuid'
307+
308+
def get_queryset(self):
309+
project = get_object_or_404(Project,
310+
name=self.kwargs.get('project_name'),
311+
ecosystem__name=self.kwargs.get('ecosystem_name'))
312+
queryset = Repository.objects.filter(dataset__project=project).distinct()
313+
314+
return queryset
315+
316+
317+
class CategoryDetail(generics.RetrieveDestroyAPIView):
318+
serializer_class = CategorySerializer
319+
model = DataSet
320+
lookup_field = 'category'
321+
322+
def get_queryset(self):
323+
project = get_object_or_404(Project,
324+
name=self.kwargs.get('project_name'),
325+
ecosystem__name=self.kwargs.get('ecosystem_name'))
326+
repo = get_object_or_404(Repository, uuid=self.kwargs.get('uuid'))
327+
queryset = DataSet.objects.filter(project=project, repository=repo)
328+
329+
return queryset
330+
331+
def destroy(self, request, *args, **kwargs):
332+
project = get_object_or_404(Project,
333+
name=self.kwargs.get('project_name'),
334+
ecosystem__name=self.kwargs.get('ecosystem_name'))
335+
repo = get_object_or_404(Repository, uuid=self.kwargs.get('uuid'))
336+
dataset = get_object_or_404(DataSet,
337+
category=self.kwargs.get('category'),
338+
repository=repo,
339+
project=project)
340+
341+
# Cancel related task
342+
if dataset.task:
343+
cancel_task(dataset.task.uuid)
344+
345+
# Delete data set
346+
dataset.delete()
347+
dataset.repository.save()
348+
349+
# Check if the related repository has no data set associated
350+
if not dataset.repository.dataset_set.exists():
351+
dataset.repository.delete()
352+
353+
return response.Response(status=status.HTTP_204_NO_CONTENT)
354+
355+
356+
class ProjectChildSerializer(serializers.ModelSerializer):
357+
"""
358+
Returns different fields for a project or a repository.
359+
"""
360+
type = serializers.CharField()
361+
name = serializers.CharField(required=False)
362+
title = serializers.CharField(required=False)
363+
uri = serializers.CharField(required=False)
364+
subprojects = serializers.IntegerField(required=False)
365+
repos = serializers.IntegerField(required=False)
366+
categories = serializers.IntegerField(required=False)
367+
368+
class Meta:
369+
model = Project
370+
fields = ['type', 'name', 'title', 'uri', 'subprojects', 'repos', 'categories']
371+
372+
def to_representation(self, instance):
373+
representation = {
374+
'id': instance.id
375+
}
376+
if hasattr(instance, 'name'):
377+
# Return project data
378+
representation['type'] = 'project'
379+
representation['name'] = instance.name
380+
representation['title'] = instance.title
381+
representation['subprojects'] = instance.subprojects.count()
382+
representation['repos'] = Repository.objects.filter(dataset__project=instance).distinct().count()
383+
else:
384+
# Return repository data
385+
representation['type'] = 'repository'
386+
representation['uri'] = instance.uri
387+
representation['categories'] = instance.dataset_set.count()
388+
389+
return representation
390+
391+
392+
@extend_schema_view(get=extend_schema(
393+
parameters=[OpenApiParameter('term', OpenApiTypes.STR, OpenApiParameter.QUERY)]
394+
))
395+
class ProjectChildrenList(generics.ListAPIView):
396+
"""
397+
Returns a paginated list of a project's descendants (repositories and subprojects).
398+
"""
399+
serializer_class = ProjectChildSerializer
400+
pagination_class = DataSourcesPaginator
401+
402+
def get_queryset(self):
403+
project = get_object_or_404(Project,
404+
name=self.kwargs.get('project_name'),
405+
ecosystem__name=self.kwargs.get('ecosystem_name'))
406+
project_queryset = Project.objects.filter(parent_project=project)
407+
repo_queryset = Repository.objects.filter(dataset__project=project).distinct()
408+
409+
term = self.request.query_params.get('term')
410+
if term is not None:
411+
project_queryset = project_queryset.filter(Q(name__icontains=term) |
412+
Q(title__icontains=term))
413+
repo_queryset = repo_queryset.filter(uri__icontains=term)
414+
415+
queryset = list(itertools.chain(project_queryset, repo_queryset))
416+
417+
return queryset

src/grimoirelab/core/datasources/urls.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,20 +16,20 @@
1616
# along with this program. If not, see <http://www.gnu.org/licenses/>.
1717
#
1818

19-
from django.urls import path, re_path
19+
from django.urls import path
2020

2121
from . import api
22-
from . import views
2322

2423

25-
datasources_urlpatterns = [
26-
re_path(r'^add_repository', views.add_repository, name='add_repository'),
27-
path('repositories/', api.RepositoryList.as_view()),
28-
]
29-
3024
ecosystems_urlpatterns = [
3125
path('', api.EcosystemList.as_view(), name='ecosystem-list'),
3226
path('<str:name>/', api.EcosystemDetail.as_view(), name='ecosystem-detail'),
3327
path('<str:ecosystem_name>/projects/', api.ProjectList.as_view(), name='projects-list'),
3428
path('<str:ecosystem_name>/projects/<str:name>', api.ProjectDetail.as_view(), name='projects-detail'),
29+
path('<str:ecosystem_name>/projects/<str:project_name>/children/', api.ProjectChildrenList.as_view(), name='children-list'),
30+
path('<str:ecosystem_name>/projects/<str:project_name>/repos/', api.RepoList.as_view(), name='repo-list'),
31+
path('<str:ecosystem_name>/projects/<str:project_name>/repos/<str:uuid>/', api.RepoDetail.as_view(), name='repo-detail'),
32+
path('<str:ecosystem_name>/projects/<str:project_name>/repos/<str:uuid>/categories/<str:category>/',
33+
api.CategoryDetail.as_view(),
34+
name='category-detail'),
3535
]

0 commit comments

Comments
 (0)