Skip to content

Api reference

components.acuity.public.admin_blueprint

PENDING_TRANSCRIPTIONS_DAYS_FROM module-attribute

PENDING_TRANSCRIPTIONS_DAYS_FROM = 100

PENDING_TRANSCRIPTIONS_LIMIT module-attribute

PENDING_TRANSCRIPTIONS_LIMIT = 1

eyecare_admin_blueprint module-attribute

eyecare_admin_blueprint = Blueprint(
    name="eyecare_admin", import_name=__name__
)

components.acuity.public.commands

DEFAULT_TEST_S3_BUCKET module-attribute

DEFAULT_TEST_S3_BUCKET = 'eu.alan.uploads'

DEFAULT_TEST_S3_KEY module-attribute

DEFAULT_TEST_S3_KEY = "eyecare/rotating-e-acuity-test-validation/default_test.json"

eyecare_commands module-attribute

eyecare_commands = AppGroup('eyecare')

get_acuity_attempts_to_transcribe

get_acuity_attempts_to_transcribe(from_date, limit)

Command to get random set of acuity attempts for human transcription.

Source code in components/acuity/public/commands.py
@eyecare_commands.command(requires_authentication=False)
@click.option("--from-date", type=click.DateTime(formats=["%Y-%m-%d"]), required=False)
@click.option("--limit", type=click.INT, default=5)
def get_acuity_attempts_to_transcribe(
    from_date: Optional[datetime], limit: int
) -> None:
    """
    Command to get random set of acuity attempts for human transcription.
    """
    from components.acuity.internal.business_logic.audit import (
        get_speech_recognizer_audits_for_transcription,
    )

    click.echo("Searching for acuity attempts to transcribe...")

    if not from_date:
        click.echo(
            "--from-date not given. Searching attempts from the last 100 days...\n"
        )
        from_date = datetime.now() - timedelta(days=100)

    attempts = get_speech_recognizer_audits_for_transcription(
        from_date=from_date, limit=limit
    )

    if not attempts:
        click.echo("Not attempts found.")
        return None

    click.echo(
        f"Found {len(attempts)} attempts (EyecareSpeechRecognizerAudit) transcription:"
    )
    for attempt in attempts:
        click.echo(f"- {attempt.id}")

validate_acuity_test

validate_acuity_test(notify_slack, to_stdout, tests_json)
Source code in components/acuity/public/commands.py
@eyecare_commands.command(requires_authentication=False)
@click.option("--notify-slack", is_flag=True)
@click.option("--to-stdout", is_flag=True)
@click.option("--tests-json", type=click.STRING)
def validate_acuity_test(  # noqa: D103
    notify_slack: bool, to_stdout: bool, tests_json: Optional[str]
) -> None:
    test_cases = _get_test_cases(tests_json=tests_json)

    with futures.ThreadPoolExecutor(max_workers=10) as executor:
        for test_case in test_cases:
            executor.submit(
                _execute_rotating_e_test_case,
                case=test_case,
                # See: https://flask.palletsprojects.com/en/0.12.x/reqcontext/#notes-on-proxies
                app=current_app._get_current_object(),  # type: ignore[attr-defined] # noqa: ALN027
            )

        executor.shutdown(wait=True)

    results = _get_test_results(test_cases)

    if notify_slack and get_slack_bot_token(current_app):
        _notify_slack(results)

    if to_stdout:
        _to_stdout(results)

components.acuity.public.services

speech_recognizer

CONFIRM_DISTANCE module-attribute

CONFIRM_DISTANCE = SpeechRecognizerConfig(
    default_model=None,
    interaction_type=VOICE_COMMAND,
    speech_contexts=[],
)

SpeechRecognizer

SpeechRecognizer(config)
Source code in components/acuity/public/services/speech_recognizer.py
def __init__(self, config: SpeechRecognizerConfig) -> None:
    self._config = config
    self._client = speech.SpeechClient(
        credentials=self._get_credentials(),  # type: ignore[no-untyped-call]
        client_options=client_options.ClientOptions(
            api_endpoint=self._config.regional_endpoint,
        ),
    )
recognize
recognize(audio, language_code=None)
Source code in components/acuity/public/services/speech_recognizer.py
def recognize(  # type: ignore[no-untyped-def]  # noqa: D102
    self, audio, language_code: Optional[str] = None
) -> Optional[SpeechRecognizerResponseData]:
    response = self._client.recognize(
        config=self._build_recognition_config(language_code=language_code),
        audio=speech.RecognitionAudio(content=audio),
    )

    if response.results:
        transcript = ""
        confidence = 0.00
        # audio bigger than 20/30 sec are split in 1 to many response.results
        for result in response.results:
            transcript += result.alternatives[0].transcript
            confidence += result.alternatives[0].confidence

        return SpeechRecognizerResponseData(
            transcript=transcript,
            confidence=confidence / len(response.results),
        )

    return None

SpeechRecognizerConfig dataclass

SpeechRecognizerConfig(
    interaction_type,
    speech_contexts,
    default_language_code="fr-FR",
    device_type=RecognitionMetadata.RecordingDeviceType.SMARTPHONE,
    distance=RecognitionMetadata.MicrophoneDistance.FARFIELD,
    max_alternatives=0,
    enable_word_level_confidence=True,
    default_model="command_and_search",
    regional_endpoint="eu-speech.googleapis.com",
)
default_language_code class-attribute instance-attribute
default_language_code = 'fr-FR'
default_model class-attribute instance-attribute
default_model = 'command_and_search'
device_type class-attribute instance-attribute
device_type = SMARTPHONE
distance class-attribute instance-attribute
distance = FARFIELD
enable_word_level_confidence class-attribute instance-attribute
enable_word_level_confidence = True
interaction_type instance-attribute
interaction_type
max_alternatives class-attribute instance-attribute
max_alternatives = 0
regional_endpoint class-attribute instance-attribute
regional_endpoint = 'eu-speech.googleapis.com'
speech_contexts instance-attribute
speech_contexts

SpeechRecognizerResponseData dataclass

SpeechRecognizerResponseData(transcript, confidence)
confidence instance-attribute
confidence
transcript instance-attribute
transcript

V1_CONFIGURATION module-attribute

V1_CONFIGURATION = SpeechRecognizerConfig(
    default_model="latest_short",
    interaction_type=DICTATION,
    speech_contexts=[
        SpeechContext(phrases=["$OOV_CLASS_ALPHA_SEQUENCE"])
    ],
)

V2_CONFIGURATION_DOWN module-attribute

V2_CONFIGURATION_DOWN = SpeechRecognizerConfig(
    default_model="command_and_search",
    interaction_type=VOICE_COMMAND,
    speech_contexts=[
        SpeechContext(phrases=["bas"], boost=20)
    ],
)

V2_CONFIGURATION_LEFT module-attribute

V2_CONFIGURATION_LEFT = SpeechRecognizerConfig(
    default_model="latest_short",
    interaction_type=VOICE_COMMAND,
    speech_contexts=[],
)

V2_CONFIGURATION_RIGHT module-attribute

V2_CONFIGURATION_RIGHT = SpeechRecognizerConfig(
    default_model=None,
    interaction_type=VOICE_COMMAND,
    speech_contexts=[
        SpeechContext(
            phrases=[
                "droite",
                "droites",
                "droit",
                "boite",
                "toi",
                "what",
                "3",
            ],
            boost=20,
        )
    ],
)

V2_CONFIGURATION_UP module-attribute

V2_CONFIGURATION_UP = SpeechRecognizerConfig(
    default_model=None,
    interaction_type=VOICE_COMMAND,
    speech_contexts=[
        SpeechContext(
            phrases=[
                "haut",
                "oh",
                "o-h",
                "au",
                "ou",
                "ont",
                "on",
            ],
            boost=20,
        )
    ],
)

components.acuity.public.user_blueprint

eyecare_user_blueprint module-attribute

eyecare_user_blueprint = Blueprint(
    name="eyecare", import_name=__name__
)