Skip to content

Commit

Permalink
pp/crud/contents_crud.py app/crud/files_crud.py
Browse files Browse the repository at this point in the history
  • Loading branch information
AthulyaMS committed Jan 10, 2024
2 parents 047b769 + bf056e0 commit c33f6ad
Show file tree
Hide file tree
Showing 17 changed files with 96 additions and 6,168 deletions.
8 changes: 0 additions & 8 deletions app/crud/contents_crud.py
Original file line number Diff line number Diff line change
Expand Up @@ -953,7 +953,6 @@ def bible_verse_type_check(content, model_cls_2, book, db_content2, chapter_numb
merged_verse_pattern = re.compile(r'(\d+)-(\d+)$')
metadata_field = {"publishedVersification":[]}
#NormalVerseNumber Pattern
# print("CONTENT",content)
if normal_verse_pattern.match(str(content['verseNumber'])):
row_other = model_cls_2(
book_id = book.bookId,
Expand Down Expand Up @@ -1046,21 +1045,15 @@ def upload_bible_books(db_: Session, resource_name, books, user_id=None): # pyl
raise TypeException("JSON is not of the required format..")
try:
chapter_number = int(chapter['number'])
# print("CHAPTERNUM", chapter_number)
except ValueError as exe:
raise TypeException("JSON is not of the required format. Chapter number should be an integer.") from exe
# Iterate over the content of the chapter
for content in item.JSON["content"]:
if isinstance(content, dict) and content.get("type") == "verse:v":
# verseNumber = content.get("number", "")
# print("VERSE" ,content)
verseNumber = content.get("number", "")
# verseText = content.get("content", "")
# print("VERSENUM", verseNumber)
next_index = item.JSON["content"].index(content) + 1
if next_index < len(item.JSON["content"]) and isinstance(item.JSON["content"][next_index], str):
verseText = item.JSON["content"][next_index]
# print("VERSETEXT", verseText)
if verseNumber:
if verseText is None:
raise TypeException("JSON is not of the required format. verseText not found")
Expand Down Expand Up @@ -1201,7 +1194,6 @@ def update_bible_books_cleaned(db_,resource_name,books,resource_db_content,user_
# Include verse and verse text in content
content["verseNumber"] = verseNumber
content["verseText"] = verseText
# print("CONTENT",content)
# Call your function to process the verse
db_content2, split_indexs = \
bible_verse_type_check(content, model_cls_2, book, db_content2, chapter_number, split_indexs)
Expand Down
21 changes: 0 additions & 21 deletions app/crud/files_crud.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,77 +124,56 @@ def parse_with_usfm_grammar(input_usfm, output_format=usfm_grammar.Format.JSON,c
excluded_markers.extend(Filter.BCV.value)
if "COMMENTS" in exclude_markers:
excluded_markers.extend(Filter.COMMENTS.value)
# print(f"Excluding all markers in Filter.COMMENTS: {excluded_markers}")
if "TITLES" in exclude_markers:
excluded_markers.extend(Filter.TITLES.value)
# print(f"Excluding all markers in Filter.TITLES: {excluded_markers}")
if "BOOK_HEADERS" in exclude_markers:
excluded_markers.extend(Filter.BOOK_HEADERS.value)
# print(f"Excluding all markers in Filter.BOOK_HEADERS: {excluded_markers}")
if "NOTES" in exclude_markers:
excluded_markers.extend(Filter.NOTES.value)
# print(f"Excluding all markers in Filter.NOTES: {excluded_markers}")
if "STUDY_BIBLE" in exclude_markers:
excluded_markers.extend(Filter.STUDY_BIBLE.value)
# print(f"Excluding all markers in Filter.STUDY_BIBLE: {excluded_markers}")
else:
for marker in exclude_markers:
if marker in Filter.PARAGRAPHS.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
if marker in Filter.BCV.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
if marker in Filter.COMMENTS.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
if marker in Filter.TITLES.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
if marker in Filter.CHARACTERS.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
if marker in Filter.BOOK_HEADERS.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
if marker in Filter.NOTES.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
if marker in Filter.STUDY_BIBLE.value:
excluded_markers.append(marker)
# print(f"Excluding marker: {marker}")
output_content = None
match output_format:
case usfm_grammar.Format.JSON:
# if output_format == usfm_grammar.Format.JSON:
output_content = usfm_parser.to_usj( )
if excluded_markers:
output_content = usfm_parser.to_usj(exclude_markers=excluded_markers)
# print("Generating JSON output with excluded markers.")
elif included_markers:
output_content = usfm_parser.to_usj(include_markers=included_markers)
# print("Generating JSON output with included markers.")
else:
print("No markers specified for JSON output.")
if chapter is not None:
output_content = extract_dict_chapter(output_content, chapter)
case usfm_grammar.Format.CSV:
output_content = usfm_parser.to_list( )
if excluded_markers:
output_content = usfm_parser.to_usj(exclude_markers=excluded_markers)
# print("Generating list output with excluded markers.")
elif included_markers:
output_content = usfm_parser.to_usj(include_markers=included_markers)
# print("Generating list output with included markers.")
else:
print("No markers specified for JSON output.")
if chapter is not None:
output_content = extract_list_chapter(output_content, chapter)
output_content = "\n".join(['\t'.join(list(row)) for row in output_content])
case usfm_grammar.Format.ST:
output_content = usfm_parser.to_syntax_tree()
if chapter is not None:
print("Not implemented chapter extractor for syntax_tree")
case usfm_grammar.Format.USX:
output_content = usfm_parser.to_usx()
if chapter is not None:
Expand Down
21 changes: 11 additions & 10 deletions app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import db_models
from database import engine
from dependencies import get_db, log
from routers import content_apis, translation_apis, auth_api, media_api, filehandling_apis
from routers import content_apis, auth_api, media_api, filehandling_apis
from auth.authentication import create_super_user
# pylint: enable=E0401

Expand All @@ -28,9 +28,14 @@

create_super_user()

root_url = os.getenv("VACHAN_DOMAIN", 'http://localhost:8000')
if root_url is not None and not root_url.startswith("http://"):
root_url = "http://" + root_url

app = FastAPI(title="Vachan-API", version="2.0.0",
description="The server application that provides APIs to interact \
with the underlying Databases and modules in Vachan-Engine.")
description=f"The server application that provides APIs to interact \
with the underlying Databases and modules in Vachan-Engine. \
\n • For Vachan-TBT docs: {root_url}/v2/text/translate/token-based/docs")
template = Jinja2Templates(directory="templates")
app.mount("/static", StaticFiles(directory="static"), name="static")

Expand Down Expand Up @@ -214,9 +219,6 @@ def test(request: Request,db_: Session = Depends(get_db)):
'''Tests if app is running and the DB connection is active
* Also displays API documentation page upon successful connection on root endpoint'''
db_.query(db_models.Language).first()
root_url = os.getenv("VACHAN_DOMAIN")
if root_url is not None and not root_url.startswith("http://"):
root_url = "http://" + root_url
return template.TemplateResponse(
"landing_page.html",
{
Expand All @@ -227,15 +229,13 @@ def test(request: Request,db_: Session = Depends(get_db)):

app.include_router(auth_api.router)
app.include_router(content_apis.router)
app.include_router(translation_apis.router)
app.include_router(media_api.router)
app.include_router(filehandling_apis.router)

beta_endpoints = [
"/graphql", # Specify the paths of the beta endpoints
"/v2/resources/bibles/{resource_name}/versification",
"/v2/resources/bibles/{resource_name}/books/{book_code}/export/{output_format}",
"/v2/text/translate/token-based/project/versification",
"/v2/media/gitlab/stream",
"/v2/media/gitlab/download",
"/v2/files/usfm/to/{output_format}"
Expand All @@ -244,8 +244,9 @@ def test(request: Request,db_: Session = Depends(get_db)):
def custom_openapi():
'''Modify the auto generated openapi schema for API docs'''
openapi_schema = get_openapi(title="Vachan-API", version="2.0.0",
description="The server application that provides APIs to interact \
with the underlying Databases and modules in Vachan-Engine.",
description=f"The server application that provides APIs to interact \
with the underlying Databases and modules in Vachan-Engine. \
<br> • <a href=\"{root_url}/v2/text/translate/token-based/docs\" > Vachan-TBT docs </a>",
routes=app.routes)

# Add version information to specific endpoints
Expand Down
15 changes: 15 additions & 0 deletions app/routers/content_apis.py
Original file line number Diff line number Diff line change
Expand Up @@ -1354,3 +1354,18 @@ async def delete_deleteditems(request: Request,user_details =Depends(get_user_or
log.info('In delete_deleteditems')
deleted_item_count = structurals_crud.cleanup_database(db_=db_)
return {'message': "Database cleanup done!!",'deletedItemCount':deleted_item_count}

@router.get('/v2/jobs', response_model=schemas_nlp.JobStatusResponse,
response_model_exclude_none=True, status_code=200,
responses={502: {"model": schemas.ErrorResponse},
422: {"model": schemas.ErrorResponse},404:{"model": schemas.ErrorResponse}},
tags=['Jobs'])
@get_auth_access_check_decorator
async def check_job_status(request: Request,
job_id:int=Query(...,examples="100000"),user_details =Depends(get_user_or_none),
db_:Session=Depends(get_db)):
'''Checking the status of a job'''
log.info('In check_job_status')
log.debug('job_id:%s', job_id)
result = nlp_sw_crud.check_job_status(db_, job_id)
return result
Loading

0 comments on commit c33f6ad

Please sign in to comment.