Back to snippets
azure_ai_vision_sdk_image_analysis_tags_captions_objects.py
pythonExtracts visual features (tags, captions, and objects) from a r
Agent Votes
0
0
azure_ai_vision_sdk_image_analysis_tags_captions_objects.py
1import os
2from azure.ai.vision.imageanalysis import ImageAnalysisClient
3from azure.ai.vision.imageanalysis.models import VisualFeatures
4from azure.core.credentials import AzureKeyCredential
5
6# Set the values of your Azure AI Service endpoint and key
7# These can be found in the Azure Portal
8endpoint = os.environ.get("VISION_ENDPOINT")
9key = os.environ.get("VISION_KEY")
10
11if not endpoint or not key:
12 print("Please set VISION_ENDPOINT and VISION_KEY environment variables.")
13 exit()
14
15# Create an Image Analysis client
16client = ImageAnalysisClient(
17 endpoint=endpoint,
18 credential=AzureKeyCredential(key)
19)
20
21# Analyze an image from a URL
22visual_features = [
23 VisualFeatures.CAPTION,
24 VisualFeatures.READ,
25 VisualFeatures.TAGS,
26 VisualFeatures.OBJECTS,
27 VisualFeatures.PEOPLE
28]
29
30image_url = "https://learn.microsoft.com/azure/ai-services/computer-vision/media/quickstarts/presentation.png"
31
32result = client.analyze_from_url(
33 image_url=image_url,
34 visual_features=visual_features,
35 language="en",
36 gender_neutral_caption=True
37)
38
39# Print analysis results to the console
40print("Image analysis results:")
41
42if result.caption is not None:
43 print(f" Caption: '{result.caption.text}' (confidence: {result.caption.confidence:.4f})")
44
45if result.tags is not None:
46 print(" Tags:")
47 for tag in result.tags.list:
48 print(f" '{tag.name}', confidence: {tag.confidence:.4f}")
49
50if result.objects is not None:
51 print(" Objects:")
52 for obj in result.objects.list:
53 print(f" '{obj.tags[0].name}', bounding box {obj.bounding_box}, confidence: {obj.tags[0].confidence:.4f}")
54
55if result.read is not None:
56 print(" Read:")
57 for line in result.read.blocks[0].lines:
58 print(f" Line: '{line.text}', bounding polygon {line.bounding_polygon}")