Skip to main content
Version: 0.10.0

Cognitive Services - Analyze Text

import os

if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia":
from pyspark.sql import SparkSession

spark = SparkSession.builder.getOrCreate()
from notebookutils.mssparkutils.credentials import getSecret

os.environ["TEXT_API_KEY"] = getSecret("mmlspark-build-keys", "cognitive-api-key")
from notebookutils.visualization import display

# put your service keys here
key = os.environ["TEXT_API_KEY"]
location = "eastus"
df = spark.createDataFrame(
["en", "Hello Seattle"],
["en", "There once was a dog who lived in London and thought she was a human"],
schema=["language", "text"],
from import *

text_analyze = (
# set the tasks to perform
.setEntityRecognitionTasks([{"parameters": {"model-version": "latest"}}])
.setKeyPhraseExtractionTasks([{"parameters": {"model-version": "latest"}}])
# Uncomment these lines to add more tasks
# .setEntityRecognitionPiiTasks([{"parameters": { "model-version": "latest"}}])
# .setEntityLinkingTasks([{"parameters": { "model-version": "latest"}}])
# .setSentimentAnalysisTasks([{"parameters": { "model-version": "latest"}}])

df_results = text_analyze.transform(df)
from pyspark.sql.functions import col

# reformat and display for easier viewing
"language", "text", "error", col("textAnalysis").getItem(0)
).select( # we are not batching so only have a single result
"language", "text", "error", "textAnalysis[0].*"
) # explode the Text Analytics tasks into columns