Skip to content

Simplify markdown table generation in magic command - %ai list #1251

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 43 additions & 30 deletions packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from langchain.chains import LLMChain
from langchain.schema import HumanMessage
from langchain_core.messages import AIMessage
from py_markdown_table.markdown_table import markdown_table

from ._version import __version__
from .parsers import (
Expand Down Expand Up @@ -210,13 +211,13 @@ def _ai_inline_list_models_for_provider(self, provider_id, Provider):

# Is the required environment variable set?
def _ai_env_status_for_provider_markdown(self, provider_id):
na_message = "Not applicable. | " + NA_MESSAGE
na_message = "Not applicable."

if (
provider_id not in self.providers
or self.providers[provider_id].auth_strategy == None
):
return na_message # No emoji
return na_message, NA_MESSAGE

not_set_title = ENV_NOT_SET
set_title = ENV_SET
Expand All @@ -236,15 +237,14 @@ def _ai_env_status_for_provider_markdown(self, provider_id):
not_set_title = MULTIENV_NOT_SET
set_title = MULTIENV_SET
else: # No environment variables
return na_message
return na_message, NA_MESSAGE # Not applicable, with a "?" emoji

output = f"{env_var_display} | "
if env_status_ok:
output += f'<abbr title="{set_title}">✅</abbr>'
status_emoji = f'<abbr title="{set_title}">✅</abbr>'
else:
output += f'<abbr title="{not_set_title}">❌</abbr>'
status_emoji = f'<abbr title="{not_set_title}">❌</abbr>'

return output
return env_var_display, status_emoji

def _ai_env_status_for_provider_text(self, provider_id):
# only handle providers with "env" or "multienv" auth strategy
Expand Down Expand Up @@ -351,42 +351,55 @@ def handle_update(self, args: UpdateArgs):
return TextOrMarkdown(output, output)

def _ai_list_command_markdown(self, single_provider=None):
output = (
"| Provider | Environment variable | Set? | Models |\n"
+ "|----------|----------------------|------|--------|\n"
)
provider_info_list = []

if single_provider is not None and single_provider not in self.providers:
return f"There is no model provider with ID `{single_provider}`."

for provider_id, Provider in self.providers.items():
if single_provider is not None and provider_id != single_provider:
continue

output += (
f"| `{provider_id}` | "
+ self._ai_env_status_for_provider_markdown(provider_id)
+ " | "
+ self._ai_inline_list_models_for_provider(provider_id, Provider)
+ " |\n"
env_var_display, status_emoji = self._ai_env_status_for_provider_markdown(
provider_id
)
provider_data = {
"Provider": f"`{provider_id}`",
"Environment variable": env_var_display,
"Set?": status_emoji,
"Models": self._ai_inline_list_models_for_provider(
provider_id, Provider
),
}
provider_info_list.append(provider_data)

# Also list aliases.
if single_provider is None and len(self.custom_model_registry) > 0:
output += (
"\nAliases and custom commands:\n\n"
+ "| Name | Target |\n"
+ "|------|--------|\n"
)
alias_list = []
for key, value in self.custom_model_registry.items():
output += f"| `{key}` | "
if isinstance(value, str):
output += f"`{value}`"
else:
output += "*custom chain*"
target = f"`{value}`" if isinstance(value, str) else "*custom chain*"
alias_list.append({"Name": key, "Target": target})

# Generate the markdown table for providers
providers_info_markdown_table = (
markdown_table(provider_info_list)
.set_params(quote=False, row_sep="markdown")
.get_markdown()
)

output += " |\n"
# Generate markdown table for aliases
alias_markdown_table_header = "\n\n Aliases and custom commands:\n"
alias_markdown_table = (
markdown_table(alias_list)
.set_params(quote=False, row_sep="markdown")
.get_markdown()
)
Comment on lines +389 to +395
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@keerthi-swarna Thanks for diving deep and experimenting. In the _ai_list_command_markdown() method, we use row_sep="markdown" to strictly follow the Markdown spec and allow JupyterLab to render our table in the browser. However, this fails for tables with large columns (like bedrock-custom) when displayed in IPython, since we show the literal string value of the table there.

To render a table row with a very long string in one column in IPython, you could explore using py_markdown_table and set row_sep="always" in _ai_list_command_text(). This would allow for table rows to occupy multiple lines, which would solve the issue you're reporting in IPython. The documentation page includes examples of how setting row_sep="always" can produce readable large tables in IPython. Can you try that and let me know how it goes?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Got it. But still it has some issues with help text being displayed. Let us have a working session offline and close this discussion.


return output
# Return the combined markdown tables
return (
providers_info_markdown_table
+ alias_markdown_table_header
+ alias_markdown_table
)

def _ai_list_command_text(self, single_provider=None):
output = ""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ class BedrockCustomProvider(BaseProvider, ChatBedrock):
),
]
help = (
"- For Cross-Region Inference use the appropriate `Inference profile ID` (Model ID with a region prefix, e.g., `us.meta.llama3-2-11b-instruct-v1:0`). See the [inference profiles documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html). \n"
"- For custom/provisioned models, specify the model ARN (Amazon Resource Name) as the model ID. For more information, see the [Amazon Bedrock model IDs documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html).\n\n"
"<ul><li> For Cross-Region Inference use the appropriate `Inference profile ID` (Model ID with a region prefix, e.g., `us.meta.llama3-2-11b-instruct-v1:0`). See the [inference profiles documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html).</li></ul>"
"<ul><li> For custom/provisioned models, specify the model ARN (Amazon Resource Name) as the model ID. For more information, see the [Amazon Bedrock model IDs documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html).</li></ul>"
"The model provider must also be specified below. This is the provider of your foundation model *in lowercase*, e.g., `amazon`, `anthropic`, `cohere`, `meta`, or `mistral`."
)
registry = True
Expand Down
1 change: 1 addition & 0 deletions packages/jupyter-ai-magics/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ dependencies = [
"typing_extensions>=4.5.0",
"click~=8.0",
"jsonpath-ng>=1.5.3,<2",
"py_markdown_table>=1.3.0"
]

[project.optional-dependencies]
Expand Down
Loading