ElenaRyumina DmitryRyumin commited on
Commit
24a839b
β€’
1 Parent(s): 5b88353

- Summary (f6125144e9df803bac37023b6854f03c30d62a31)
- Merge branch 'main' of https://huggingface.co/spaces/ElenaRyumina/OCEANAI (2f050c06a94348ba17660fdf0dd257a7a30f2f9a)
- Summary (8f52d543a1de5a77b59164e6f4f721696fc64299)
- Merge branch 'main' of https://huggingface.co/spaces/ElenaRyumina/OCEANAI (fffa0062f0da04563ce8cd5b8c7e70e3ae783833)
- Summary (1717e06c53138f3eaf52e38f984035d5fbbceac5)
- Merge branch 'main' of https://huggingface.co/spaces/ElenaRyumina/OCEANAI (cd90a1620005039e6998ca3d0ab34fe3bfe1f2dc)
- Summary (98c66eeb6760ebd60d0a4ae4d9d0ac8f896a59df)
- Update config.toml (91d9d7ef4e540930919833c79607c51fc294d60d)
- Merge branch 'main' of https://huggingface.co/spaces/ElenaRyumina/OCEANAI (e6358979bef7df974cdd6c63aca029c57ac62af8)
- Summary (6de61f5802c0e3640047369ad08ecab73b331d34)


Co-authored-by: Dmitry Ryumin <DmitryRyumin@users.noreply.huggingface.co>

README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ˜€πŸ€“πŸ˜ŽπŸ˜‰πŸ˜€
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.6.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.7.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -93,6 +93,6 @@ if __name__ == "__main__":
93
 
94
  create_gradio_app().queue(api_open=False).launch(
95
  share=False,
96
- server_name=None,
97
- server_port=None,
98
  )
 
93
 
94
  create_gradio_app().queue(api_open=False).launch(
95
  share=False,
96
+ server_name=config_data.AppSettings_SERVER_NAME,
97
+ server_port=config_data.AppSettings_PORT,
98
  )
app/data_init.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: data_init.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: Data initialization.
5
+ License: MIT License
6
+ """
7
+
8
+ from app.config import config_data
9
+ from app.utils import read_csv_file, extract_profession_weights
10
+
11
+
12
+ df_traits_priority_for_professions = read_csv_file(config_data.Links_PROFESSIONS)
13
+ weights_professions, interactive_professions = extract_profession_weights(
14
+ df_traits_priority_for_professions,
15
+ config_data.Settings_DROPDOWN_CANDIDATES[0],
16
+ )
app/event_handlers/calculate_practical_tasks.py CHANGED
@@ -16,11 +16,13 @@ from bs4 import BeautifulSoup
16
  from app.config import config_data
17
  from app.video_metadata import video_metadata
18
  from app.mbti_description import MBTI_DESCRIPTION, MBTI_DATA
 
19
  from app.utils import (
20
  read_csv_file,
21
  apply_rounding_and_rename_columns,
22
  preprocess_scores_df,
23
  get_language_settings,
 
24
  )
25
  from app.components import (
26
  html_message,
@@ -267,6 +269,7 @@ def event_handler_calculate_practical_task_blocks(
267
 
268
  df_hidden = df.drop(
269
  columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS
 
270
  + config_data.Settings_DROPDOWN_MBTI_DEL_COLS_WEBCAM
271
  )
272
 
@@ -350,117 +353,226 @@ def event_handler_calculate_practical_task_blocks(
350
 
351
  return existing_tuple[:-1] + person_metadata + existing_tuple[-1:]
352
  elif practical_subtasks.lower() == "professional groups":
353
- sum_weights = sum(
354
- [
355
- number_openness,
356
- number_conscientiousness,
357
- number_extraversion,
358
- number_agreeableness,
359
- number_non_neuroticism,
360
- ]
361
- )
362
-
363
- if sum_weights != 100:
364
- gr.Warning(config_data.InformationMessages_SUM_WEIGHTS.format(sum_weights))
365
-
366
- return (
367
- gr.Row(visible=False),
368
- gr.Column(visible=False),
369
- dataframe(visible=False),
370
- files_create_ui(
371
- None,
372
- "single",
373
- [".csv"],
374
- config_data.OtherMessages_EXPORT_PS,
375
- True,
376
- False,
377
- False,
378
- "csv-container",
379
- ),
380
- gr.Accordion(visible=False),
381
- gr.HTML(visible=False),
382
- dataframe(visible=False),
383
- gr.Column(visible=False),
384
- video_create_ui(visible=False),
385
- gr.Column(visible=False),
386
- gr.Row(visible=False),
387
- gr.Row(visible=False),
388
- gr.Image(visible=False),
389
- textbox_create_ui(visible=False),
390
- gr.Row(visible=False),
391
- gr.Image(visible=False),
392
- textbox_create_ui(visible=False),
393
- gr.Row(visible=False),
394
- gr.Row(visible=False),
395
- gr.Image(visible=False),
396
- textbox_create_ui(visible=False),
397
- gr.Row(visible=False),
398
- gr.Image(visible=False),
399
- textbox_create_ui(visible=False),
400
- html_message(
401
- config_data.InformationMessages_SUM_WEIGHTS.format(sum_weights),
402
- False,
403
- True,
404
- ),
405
- )
406
- else:
407
- b5._candidate_ranking(
408
- df_files=pt_scores.iloc[:, 1:],
409
- weigths_openness=number_openness,
410
- weigths_conscientiousness=number_conscientiousness,
411
- weigths_extraversion=number_extraversion,
412
- weigths_agreeableness=number_agreeableness,
413
- weigths_non_neuroticism=number_non_neuroticism,
414
- out=False,
415
  )
416
 
417
- df = apply_rounding_and_rename_columns(b5.df_files_ranking_)
 
 
 
418
 
419
- df_hidden = df.drop(columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
 
421
- df_hidden.to_csv(config_data.Filenames_POTENTIAL_CANDIDATES)
422
 
423
- df_hidden.reset_index(inplace=True)
 
 
424
 
425
- person_id = (
426
- int(df_hidden.iloc[0][config_data.Dataframes_PT_SCORES[0][0]]) - 1
427
- )
428
 
429
- person_metadata = create_person_metadata(person_id, files, video_metadata)
430
 
431
- existing_tuple = (
432
- gr.Row(visible=True),
433
- gr.Column(visible=True),
434
- dataframe(
435
- headers=df_hidden.columns.tolist(),
436
- values=df_hidden.values.tolist(),
437
- visible=True,
438
- ),
439
- files_create_ui(
440
- config_data.Filenames_POTENTIAL_CANDIDATES,
441
- "single",
442
- [".csv"],
443
- config_data.OtherMessages_EXPORT_PG,
444
- True,
445
- False,
446
- True,
447
- "csv-container",
448
- ),
449
- gr.Accordion(visible=False),
450
- gr.HTML(visible=False),
451
- dataframe(visible=False),
452
- gr.Column(visible=True),
453
- video_create_ui(
454
- value=files[person_id],
455
- file_name=Path(files[person_id]).name,
456
- label="Best Person ID - " + str(person_id + 1),
457
- visible=True,
458
- elem_classes="video-sorted-container",
459
- ),
460
- html_message(config_data.InformationMessages_NOTI_IN_DEV, False, False),
461
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462
 
463
- return existing_tuple[:-1] + person_metadata + existing_tuple[-1:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464
  elif practical_subtasks.lower() == "professional skills":
465
  df_professional_skills = read_csv_file(config_data.Links_PROFESSIONAL_SKILLS)
466
 
 
16
  from app.config import config_data
17
  from app.video_metadata import video_metadata
18
  from app.mbti_description import MBTI_DESCRIPTION, MBTI_DATA
19
+ from app.data_init import df_traits_priority_for_professions
20
  from app.utils import (
21
  read_csv_file,
22
  apply_rounding_and_rename_columns,
23
  preprocess_scores_df,
24
  get_language_settings,
25
+ extract_profession_weights,
26
  )
27
  from app.components import (
28
  html_message,
 
269
 
270
  df_hidden = df.drop(
271
  columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS
272
+ + config_data.Settings_DROPDOWN_MBTI_DEL_COLS
273
  + config_data.Settings_DROPDOWN_MBTI_DEL_COLS_WEBCAM
274
  )
275
 
 
353
 
354
  return existing_tuple[:-1] + person_metadata + existing_tuple[-1:]
355
  elif practical_subtasks.lower() == "professional groups":
356
+ if type_modes == config_data.Settings_TYPE_MODES[0]:
357
+ sum_weights = sum(
358
+ [
359
+ number_openness,
360
+ number_conscientiousness,
361
+ number_extraversion,
362
+ number_agreeableness,
363
+ number_non_neuroticism,
364
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365
  )
366
 
367
+ if sum_weights != 100:
368
+ gr.Warning(
369
+ config_data.InformationMessages_SUM_WEIGHTS.format(sum_weights)
370
+ )
371
 
372
+ return (
373
+ gr.Row(visible=False),
374
+ gr.Column(visible=False),
375
+ dataframe(visible=False),
376
+ files_create_ui(
377
+ None,
378
+ "single",
379
+ [".csv"],
380
+ config_data.OtherMessages_EXPORT_PS,
381
+ True,
382
+ False,
383
+ False,
384
+ "csv-container",
385
+ ),
386
+ gr.Accordion(visible=False),
387
+ gr.HTML(visible=False),
388
+ dataframe(visible=False),
389
+ gr.Column(visible=False),
390
+ video_create_ui(visible=False),
391
+ gr.Column(visible=False),
392
+ gr.Row(visible=False),
393
+ gr.Row(visible=False),
394
+ gr.Image(visible=False),
395
+ textbox_create_ui(visible=False),
396
+ gr.Row(visible=False),
397
+ gr.Image(visible=False),
398
+ textbox_create_ui(visible=False),
399
+ gr.Row(visible=False),
400
+ gr.Row(visible=False),
401
+ gr.Image(visible=False),
402
+ textbox_create_ui(visible=False),
403
+ gr.Row(visible=False),
404
+ gr.Image(visible=False),
405
+ textbox_create_ui(visible=False),
406
+ html_message(
407
+ config_data.InformationMessages_SUM_WEIGHTS.format(sum_weights),
408
+ False,
409
+ True,
410
+ ),
411
+ )
412
+ else:
413
+ b5._candidate_ranking(
414
+ df_files=pt_scores.iloc[:, 1:],
415
+ weigths_openness=number_openness,
416
+ weigths_conscientiousness=number_conscientiousness,
417
+ weigths_extraversion=number_extraversion,
418
+ weigths_agreeableness=number_agreeableness,
419
+ weigths_non_neuroticism=number_non_neuroticism,
420
+ out=False,
421
+ )
422
 
423
+ df = apply_rounding_and_rename_columns(b5.df_files_ranking_)
424
 
425
+ df_hidden = df.drop(
426
+ columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS
427
+ )
428
 
429
+ df_hidden.to_csv(config_data.Filenames_POTENTIAL_CANDIDATES)
 
 
430
 
431
+ df_hidden.reset_index(inplace=True)
432
 
433
+ person_id = (
434
+ int(df_hidden.iloc[0][config_data.Dataframes_PT_SCORES[0][0]]) - 1
435
+ )
436
+
437
+ person_metadata = create_person_metadata(
438
+ person_id, files, video_metadata
439
+ )
440
+ elif type_modes == config_data.Settings_TYPE_MODES[1]:
441
+ all_hidden_dfs = []
442
+
443
+ for dropdown_candidate in config_data.Settings_DROPDOWN_CANDIDATES[:-1]:
444
+ weights, _ = extract_profession_weights(
445
+ df_traits_priority_for_professions,
446
+ dropdown_candidate,
447
+ )
448
+
449
+ sum_weights = sum(weights)
450
+
451
+ if sum_weights != 100:
452
+ gr.Warning(
453
+ config_data.InformationMessages_SUM_WEIGHTS.format(sum_weights)
454
+ )
455
+
456
+ return (
457
+ gr.Row(visible=False),
458
+ gr.Column(visible=False),
459
+ dataframe(visible=False),
460
+ files_create_ui(
461
+ None,
462
+ "single",
463
+ [".csv"],
464
+ config_data.OtherMessages_EXPORT_PS,
465
+ True,
466
+ False,
467
+ False,
468
+ "csv-container",
469
+ ),
470
+ gr.Accordion(visible=False),
471
+ gr.HTML(visible=False),
472
+ dataframe(visible=False),
473
+ gr.Column(visible=False),
474
+ video_create_ui(visible=False),
475
+ gr.Column(visible=False),
476
+ gr.Row(visible=False),
477
+ gr.Row(visible=False),
478
+ gr.Image(visible=False),
479
+ textbox_create_ui(visible=False),
480
+ gr.Row(visible=False),
481
+ gr.Image(visible=False),
482
+ textbox_create_ui(visible=False),
483
+ gr.Row(visible=False),
484
+ gr.Row(visible=False),
485
+ gr.Image(visible=False),
486
+ textbox_create_ui(visible=False),
487
+ gr.Row(visible=False),
488
+ gr.Image(visible=False),
489
+ textbox_create_ui(visible=False),
490
+ html_message(
491
+ config_data.InformationMessages_SUM_WEIGHTS.format(
492
+ sum_weights
493
+ ),
494
+ False,
495
+ True,
496
+ ),
497
+ )
498
+ else:
499
+ b5._candidate_ranking(
500
+ df_files=pt_scores.iloc[:, 1:],
501
+ weigths_openness=weights[0],
502
+ weigths_conscientiousness=weights[1],
503
+ weigths_extraversion=weights[2],
504
+ weigths_agreeableness=weights[3],
505
+ weigths_non_neuroticism=weights[4],
506
+ out=False,
507
+ )
508
+
509
+ df = apply_rounding_and_rename_columns(b5.df_files_ranking_)
510
+
511
+ df_hidden = df.drop(
512
+ columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS
513
+ + config_data.Settings_DROPDOWN_MBTI_DEL_COLS_WEBCAM
514
+ )
515
+
516
+ df_hidden.insert(0, "Professional Group", dropdown_candidate)
517
+
518
+ all_hidden_dfs.append(df_hidden)
519
+
520
+ df_hidden = pd.concat(all_hidden_dfs, ignore_index=True)
521
+
522
+ df_hidden.rename(
523
+ columns={
524
+ "Candidate score": "Summary Score",
525
+ },
526
+ inplace=True,
527
+ )
528
+
529
+ df_hidden = df_hidden.sort_values(by="Summary Score", ascending=False)
530
+
531
+ df_hidden.reset_index(drop=True, inplace=True)
532
 
533
+ df_hidden.to_csv(
534
+ config_data.Filenames_POTENTIAL_CANDIDATES, index=False
535
+ )
536
+
537
+ person_id = 0
538
+
539
+ person_metadata = create_person_metadata(
540
+ person_id, files, video_metadata
541
+ )
542
+
543
+ existing_tuple = (
544
+ gr.Row(visible=True),
545
+ gr.Column(visible=True),
546
+ dataframe(
547
+ headers=df_hidden.columns.tolist(),
548
+ values=df_hidden.values.tolist(),
549
+ visible=True,
550
+ ),
551
+ files_create_ui(
552
+ config_data.Filenames_POTENTIAL_CANDIDATES,
553
+ "single",
554
+ [".csv"],
555
+ config_data.OtherMessages_EXPORT_PG,
556
+ True,
557
+ False,
558
+ True,
559
+ "csv-container",
560
+ ),
561
+ gr.Accordion(visible=False),
562
+ gr.HTML(visible=False),
563
+ dataframe(visible=False),
564
+ gr.Column(visible=True),
565
+ video_create_ui(
566
+ value=files[person_id],
567
+ file_name=Path(files[person_id]).name,
568
+ label="Best Person ID - " + str(person_id + 1),
569
+ visible=True,
570
+ elem_classes="video-sorted-container",
571
+ ),
572
+ html_message(config_data.InformationMessages_NOTI_IN_DEV, False, False),
573
+ )
574
+
575
+ return existing_tuple[:-1] + person_metadata + existing_tuple[-1:]
576
  elif practical_subtasks.lower() == "professional skills":
577
  df_professional_skills = read_csv_file(config_data.Links_PROFESSIONAL_SKILLS)
578
 
app/event_handlers/dropdown_candidates.py CHANGED
@@ -7,16 +7,13 @@ License: MIT License
7
 
8
  # Importing necessary components for the Gradio app
9
  from app.config import config_data
10
- from app.utils import read_csv_file, extract_profession_weights
 
11
  from app.components import number_create_ui
12
 
13
 
14
  def event_handler_dropdown_candidates(practical_subtasks, dropdown_candidates):
15
  if practical_subtasks.lower() == "professional groups":
16
- df_traits_priority_for_professions = read_csv_file(
17
- config_data.Links_PROFESSIONS
18
- )
19
-
20
  weights, interactive = extract_profession_weights(
21
  df_traits_priority_for_professions,
22
  dropdown_candidates,
 
7
 
8
  # Importing necessary components for the Gradio app
9
  from app.config import config_data
10
+ from app.utils import extract_profession_weights
11
+ from app.data_init import df_traits_priority_for_professions
12
  from app.components import number_create_ui
13
 
14
 
15
  def event_handler_dropdown_candidates(practical_subtasks, dropdown_candidates):
16
  if practical_subtasks.lower() == "professional groups":
 
 
 
 
17
  weights, interactive = extract_profession_weights(
18
  df_traits_priority_for_professions,
19
  dropdown_candidates,
app/event_handlers/practical_subtasks.py CHANGED
@@ -9,7 +9,8 @@ import gradio as gr
9
 
10
  # Importing necessary components for the Gradio app
11
  from app.config import config_data
12
- from app.utils import read_csv_file, extract_profession_weights, get_language_settings
 
13
  from app.components import number_create_ui, dropdown_create_ui
14
 
15
 
@@ -72,14 +73,6 @@ def event_handler_practical_subtasks(
72
  number_create_ui(visible=False),
73
  )
74
  elif practical_subtasks.lower() == "professional groups":
75
- df_traits_priority_for_professions = read_csv_file(
76
- config_data.Links_PROFESSIONS
77
- )
78
- weights_professions, interactive_professions = extract_profession_weights(
79
- df_traits_priority_for_professions,
80
- config_data.Settings_DROPDOWN_CANDIDATES[0],
81
- )
82
-
83
  return (
84
  practical_subtasks_selected,
85
  gr.Column(visible=visible_subtasks),
 
9
 
10
  # Importing necessary components for the Gradio app
11
  from app.config import config_data
12
+ from app.utils import read_csv_file, get_language_settings
13
+ from app.data_init import weights_professions, interactive_professions
14
  from app.components import number_create_ui, dropdown_create_ui
15
 
16
 
 
73
  number_create_ui(visible=False),
74
  )
75
  elif practical_subtasks.lower() == "professional groups":
 
 
 
 
 
 
 
 
76
  return (
77
  practical_subtasks_selected,
78
  gr.Column(visible=visible_subtasks),
app/tabs.py CHANGED
@@ -13,10 +13,11 @@ from app.description_steps import STEP_1, STEP_2
13
  from app.mbti_description import MBTI_DESCRIPTION, MBTI_DATA
14
  from app.app import APP
15
  from app.authors import AUTHORS
 
16
  from app.requirements_app import read_requirements_to_df
17
  from app.config import config_data
18
  from app.practical_tasks import supported_practical_tasks
19
- from app.utils import read_csv_file, extract_profession_weights
20
  from app.components import (
21
  html_message,
22
  files_create_ui,
@@ -386,14 +387,6 @@ def app_tab():
386
  elem_classes="dropdown-container",
387
  )
388
 
389
- df_traits_priority_for_professions = read_csv_file(
390
- config_data.Links_PROFESSIONS
391
- )
392
- weights_professions, interactive_professions = extract_profession_weights(
393
- df_traits_priority_for_professions,
394
- config_data.Settings_DROPDOWN_CANDIDATES[0],
395
- )
396
-
397
  number_openness = number_create_ui(
398
  value=weights_professions[0],
399
  minimum=config_data.Values_0_100[0],
 
13
  from app.mbti_description import MBTI_DESCRIPTION, MBTI_DATA
14
  from app.app import APP
15
  from app.authors import AUTHORS
16
+ from app.data_init import weights_professions, interactive_professions
17
  from app.requirements_app import read_requirements_to_df
18
  from app.config import config_data
19
  from app.practical_tasks import supported_practical_tasks
20
+ from app.utils import read_csv_file
21
  from app.components import (
22
  html_message,
23
  files_create_ui,
 
387
  elem_classes="dropdown-container",
388
  )
389
 
 
 
 
 
 
 
 
 
390
  number_openness = number_create_ui(
391
  value=weights_professions[0],
392
  minimum=config_data.Values_0_100[0],
config.toml CHANGED
@@ -1,5 +1,5 @@
1
  [AppSettings]
2
- APP_VERSION = "0.10.2"
3
  SERVER_NAME = "127.0.0.1"
4
  PORT = 7860
5
  CSS_PATH = "app.css"
@@ -148,7 +148,7 @@ DROPDOWN_MBTI = [
148
  "The Commander (ENTJ): Construction Supervisor, Health Services Administrator, Financial Accountant, Auditor, Lawyer, School Principal, Chemical Engineer, Database Manager, etc.",
149
  ]
150
  DROPDOWN_MBTI_DEL_COLS = ["EI", "SN", "TF", "JP", "Match"]
151
- DROPDOWN_MBTI_DEL_COLS_WEBCAM = ["EI", "SN", "TF", "JP", "Match", "Path"]
152
  SHOW_VIDEO_METADATA = true
153
  SUPPORTED_VIDEO_EXT = ["mp4", "mov", "avi", "flv"]
154
  TYPE_MODES = ["Files", "Web"]
 
1
  [AppSettings]
2
+ APP_VERSION = "0.10.4"
3
  SERVER_NAME = "127.0.0.1"
4
  PORT = 7860
5
  CSS_PATH = "app.css"
 
148
  "The Commander (ENTJ): Construction Supervisor, Health Services Administrator, Financial Accountant, Auditor, Lawyer, School Principal, Chemical Engineer, Database Manager, etc.",
149
  ]
150
  DROPDOWN_MBTI_DEL_COLS = ["EI", "SN", "TF", "JP", "Match"]
151
+ DROPDOWN_MBTI_DEL_COLS_WEBCAM = ["Path"]
152
  SHOW_VIDEO_METADATA = true
153
  SUPPORTED_VIDEO_EXT = ["mp4", "mov", "avi", "flv"]
154
  TYPE_MODES = ["Files", "Web"]
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio==5.6.0
2
  PyYAML==6.0.2
3
  toml==0.10.2
4
  oceanai==1.0.0a46
 
1
+ gradio==5.7.1
2
  PyYAML==6.0.2
3
  toml==0.10.2
4
  oceanai==1.0.0a46