diff --git a/examples/tutorials/forced_alignment_tutorial.py b/examples/tutorials/forced_alignment_tutorial.py index e0c6a769f7..21f2765f6f 100644 --- a/examples/tutorials/forced_alignment_tutorial.py +++ b/examples/tutorials/forced_alignment_tutorial.py @@ -78,7 +78,7 @@ # Generate frame-wise label probability # ------------------------------------- # -# The first step is to generate the label class porbability of each audio +# The first step is to generate the label class probability of each audio # frame. We can use a Wav2Vec2 model that is trained for ASR. Here we use # :py:func:`torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H`. # @@ -143,7 +143,7 @@ def plot(): # :math:`c_j` at :math:`t` and it transitioned to the next label # :math:`c_{j+1}` at :math:`t+1`. # -# The follwoing diagram illustrates this transition. +# The following diagram illustrates this transition. # # .. image:: https://download.pytorch.org/torchaudio/tutorial-assets/ctc-forward.png #